本文整理汇总了Python中util.make_summary方法的典型用法代码示例。如果您正苦于以下问题:Python util.make_summary方法的具体用法?Python util.make_summary怎么用?Python util.make_summary使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类util
的用法示例。
在下文中一共展示了util.make_summary方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
# 需要导入模块: import util [as 别名]
# 或者: from util import make_summary [as 别名]
def evaluate(self, session, official_stdout=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, _, _, _, gold_starts, gold_ends, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(self.predictions, feed_dict=feed_dict)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout)
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}%".format(f * 100))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), average_f1
示例2: evaluate
# 需要导入模块: import util [as 别名]
# 或者: from util import make_summary [as 别名]
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
losses = []
doc_keys = []
num_evaluated= 0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# if tensorized_example[0].shape[0] <= 9:
if keys is not None and example['doc_key'] not in keys:
# print('Skipping...', example['doc_key'], tensorized_example[0].shape)
continue
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
# losses.append(session.run(self.loss, feed_dict=feed_dict))
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout )
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), f
示例3: evaluate
# 需要导入模块: import util [as 别名]
# 或者: from util import make_summary [as 别名]
def evaluate(self, session, official_stdout=False, pprint=False, test=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
if not test:
session.run(self.switch_to_test_mode_op)
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, _, _, _, gold_starts, gold_ends, _ = tensorized_example
feed_dict = {i: t for i, t in zip(self.input_tensors, tensorized_example)}
candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(
self.predictions, feed_dict=feed_dict)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends,
predicted_antecedents, example["clusters"],
coref_evaluator)
if pprint:
tokens = util.flatten(example["sentences"])
print("GOLD CLUSTERS:")
util.coref_pprint(tokens, example["clusters"])
print("PREDICTED CLUSTERS:")
util.coref_pprint(tokens, coref_predictions[example["doc_key"]])
print('==================================================================')
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
if not test:
session.run(self.switch_to_train_mode_op)
summary_dict = {}
p, r, f = coref_evaluator.get_prf()
average_f1 = f * 100
summary_dict["Average F1 (py)"] = average_f1
print("Average F1 (py): {:.2f}%".format(average_f1))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
# if test:
# conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout)
# average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
# summary_dict["Average F1 (conll)"] = average_f1
# print("Average F1 (conll): {:.2f}%".format(average_f1))
return util.make_summary(summary_dict), average_f1
示例4: evaluate
# 需要导入模块: import util [as 别名]
# 或者: from util import make_summary [as 别名]
def evaluate(self, session, official_stdout=False, pprint=False, test=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
feed_dict = {self.input_tensors[k]: tensorized_example[k] for k in self.input_tensors}
candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(
self.predictions, feed_dict=feed_dict)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends,
predicted_antecedents, example["clusters"],
coref_evaluator)
if pprint:
tokens = util.flatten(example["sentences"])
print("GOLD CLUSTERS:")
util.coref_pprint(tokens, example["clusters"])
print("PREDICTED CLUSTERS:")
util.coref_pprint(tokens, coref_predictions[example["doc_key"]])
print("==================================================================")
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
p, r, f = coref_evaluator.get_prf()
average_f1 = f * 100
summary_dict["Average F1 (py)"] = average_f1
print("Average F1 (py): {:.2f}%".format(average_f1))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
# if test:
# conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout)
# average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
# summary_dict["Average F1 (conll)"] = average_f1
# print("Average F1 (conll): {:.2f}%".format(average_f1))
return util.make_summary(summary_dict), average_f1
示例5: evaluate
# 需要导入模块: import util [as 别名]
# 或者: from util import make_summary [as 别名]
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
losses = []
doc_keys = []
num_evaluated= 0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# if tensorized_example[0].shape[0] <= 9:
# if keys is not None and example['doc_key'] in keys:
# print('Skipping...', example['doc_key'], tensorized_example[0].shape)
# continue
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
# losses.append(session.run(self.loss, feed_dict=feed_dict))
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
# with open('doc_keys_512.txt', 'w') as f:
# for key in doc_keys:
# f.write(key + '\n')
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout )
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), f
示例6: evaluate
# 需要导入模块: import util [as 别名]
# 或者: from util import make_summary [as 别名]
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
losses = []
doc_keys = []
num_evaluated= 0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
# if tensorized_example[0].shape[0] <= 9:
# if keys is not None and example['doc_key'] in keys:
# print('Skipping...', example['doc_key'], tensorized_example[0].shape)
# continue
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
# losses.append(session.run(self.loss, feed_dict=feed_dict))
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
# with open('doc_keys_512.txt', 'w') as f:
# for key in doc_keys:
# f.write(key + '\n')
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout )
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), f