本文整理汇总了Python中note.Note.text_list方法的典型用法代码示例。如果您正苦于以下问题:Python Note.text_list方法的具体用法?Python Note.text_list怎么用?Python Note.text_list使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类note.Note
的用法示例。
在下文中一共展示了Note.text_list方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from note import Note [as 别名]
# 或者: from note.Note import text_list [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i",
dest = "txt",
help = "The files to be predicted on (e.g. data/demo.tsv)",
)
parser.add_argument("-m",
dest = "model",
help = "The file to store the pickled model (e.g. models/demo.model)",
)
parser.add_argument("-o",
dest = "out",
help = "The directory to output predicted files (e.g. data/predictions)",
)
# Parse the command line arguments
args = parser.parse_args()
if (not args.txt) or (not args.model) or (not args.out):
parser.print_help()
exit(1)
# Decode arguments
txt_files = glob.glob(args.txt)
model_path = args.model
out_dir = args.out
# Available data
if not txt_files:
print 'no predicting files :('
exit(1)
# Load model
with open(model_path+'.model', 'rb') as fid:
clf = pickle.load(fid)
with open(model_path+'.dict', 'rb') as fid:
vec = pickle.load(fid)
# Predict labels for each file
for pfile in txt_files:
note = Note()
note.read(pfile)
XNotNormalized = zip(note.sid_list(), note.text_list())
X = XNotNormalized
#X = normalize_data_matrix(XNotNormalized)
# Predict
labels = predict( X, clf, vec )
# output predictions
outfile = os.path.join(out_dir, os.path.basename(pfile))
note.write( outfile, labels )
示例2: main
# 需要导入模块: from note import Note [as 别名]
# 或者: from note.Note import text_list [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t",
help = "Files containing predictions",
dest = "txt",
default = os.path.join(BASE_DIR, 'data/predictions/*')
)
parser.add_argument("-r",
help = "The directory that contains reference gold standard concept files",
dest = "ref",
default = os.path.join(BASE_DIR, 'data')
)
parser.add_argument("-o",
help = "Write the evaluation to a file rather than STDOUT",
dest = "output",
default = None
)
parser.add_argument("-e",
help = "Do error analysis",
dest = "error",
action = 'store_true'
)
# Parse command line arguments
args = parser.parse_args()
# Is output destination specified
if args.output:
args.output = open(args.output, "w")
else:
args.output = sys.stdout
txt_files = glob.glob(args.txt)
txt_files_map = helper.map_files(txt_files)
ref_directory = args.ref
ref_files = os.listdir(ref_directory)
ref_files = map(lambda f: os.path.join(args.ref, f), ref_files)
ref_files_map = helper.map_files(ref_files)
files = []
for k in txt_files_map:
if k in ref_files_map:
files.append((txt_files_map[k], ref_files_map[k]))
print files
# Useful for error analysis
text = []
# One list of all labels
pred_labels = []
gold_labels = []
# txt <- predicted labels
# ref <- actual labels
for txt, ref in files:
# A note that represents the model's predictions
pnote = Note()
pnote.read( txt )
# A note that is the actual concept labels
gnote = Note()
gnote.read( ref )
# Accumulate all predictions
pred_labels += pnote.label_list()
gold_labels += gnote.label_list()
# Collect text for error analysis
text += pnote.text_list()
# Compute results
evaluate(pred_labels, gold_labels, out=args.output)
# Error analysis
if args.error:
print '\n\n\n'
error_analysis(text, pred_labels, gold_labels)