本文整理汇总了Python中seqeval.metrics.recall_score方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.recall_score方法的具体用法?Python metrics.recall_score怎么用?Python metrics.recall_score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类seqeval.metrics
的用法示例。
在下文中一共展示了metrics.recall_score方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate_results
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def evaluate_results(net, test_loader, pad_id, cuda):
logger.info("Evaluating test samples...")
acc = 0; out_labels = []; true_labels = []
net.eval()
with torch.no_grad():
for i, data in tqdm(enumerate(test_loader), total=len(test_loader)):
x, e1_e2_start, labels, _,_,_ = data
attention_mask = (x != pad_id).float()
token_type_ids = torch.zeros((x.shape[0], x.shape[1])).long()
if cuda:
x = x.cuda()
labels = labels.cuda()
attention_mask = attention_mask.cuda()
token_type_ids = token_type_ids.cuda()
classification_logits = net(x, token_type_ids=token_type_ids, attention_mask=attention_mask, Q=None,\
e1_e2_start=e1_e2_start)
accuracy, (o, l) = evaluate_(classification_logits, labels, ignore_idx=-1)
out_labels.append([str(i) for i in o]); true_labels.append([str(i) for i in l])
acc += accuracy
accuracy = acc/(i + 1)
results = {
"accuracy": accuracy,
"precision": precision_score(true_labels, out_labels),
"recall": recall_score(true_labels, out_labels),
"f1": f1_score(true_labels, out_labels)
}
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results
示例2: _eval_end
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def _eval_end(self, outputs):
"Evaluation called for both Val and Test"
val_loss_mean = torch.stack([x["val_loss"] for x in outputs]).mean()
preds = np.concatenate([x["pred"] for x in outputs], axis=0)
preds = np.argmax(preds, axis=2)
out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0)
label_map = {i: label for i, label in enumerate(self.labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"val_loss": val_loss_mean,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
if self.is_logger():
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
tensorboard_logs = results
ret = {k: v for k, v in results.items()}
ret["log"] = tensorboard_logs
return ret, preds_list, out_label_list
示例3: get_slot_metrics
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def get_slot_metrics(preds, labels):
assert len(preds) == len(labels)
return {
"slot_precision": precision_score(labels, preds),
"slot_recall": recall_score(labels, preds),
"slot_f1": f1_score(labels, preds)
}
示例4: f1_pre_rec
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def f1_pre_rec(labels, preds):
return {
"precision": precision_score(labels, preds, suffix=True),
"recall": recall_score(labels, preds, suffix=True),
"f1": f1_score(labels, preds, suffix=True)
}
示例5: evaluate
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def evaluate(self, data: List[List[str]], labels: List[List[str]]) -> float:
"""Evaluate the performance of ner model with given data and labels, and return the f1
score.
Args:
data: List of List of str. List of tokenized (in char level) texts ,
like ``[['我', '在', '上', '海', '上', '学'], ...]``.
labels: List of List of str. The corresponding labels , usually in BIO or BIOES
format, like ``[['O', 'O', 'B-LOC', 'I-LOC', 'O', 'O'], ...]``.
Returns:
Float. The F1 score.
"""
features, y = self.preprocessor.prepare_input(data, labels)
pred_probs = self.model.predict(features)
if self.preprocessor.use_bert:
pred_probs = pred_probs[:, 1:-1, :] # remove <CLS> and <SEQ>
lengths = [min(len(label), pred_prob.shape[0])
for label, pred_prob in zip(labels, pred_probs)]
y_pred = self.preprocessor.label_decode(pred_probs, lengths)
r = metrics.recall_score(labels, y_pred)
p = metrics.precision_score(labels, y_pred)
f1 = metrics.f1_score(labels, y_pred)
logging.info('Recall: {}, Precision: {}, F1: {}'.format(r, p, f1))
logging.info(metrics.classification_report(labels, y_pred))
return f1
示例6: on_epoch_end
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def on_epoch_end(self, epoch, logs=None):
pred_probs = self.model.predict(self.valid_features)
if self.preprocessor.use_bert:
pred_probs = pred_probs[:, 1:-1, :] # remove <CLS> and <SEQ>
y_pred = self.preprocessor.label_decode(pred_probs, self.get_lengths(pred_probs))
r = metrics.recall_score(self.valid_labels, y_pred)
p = metrics.precision_score(self.valid_labels, y_pred)
f1 = metrics.f1_score(self.valid_labels, y_pred)
logs['val_r'] = r
logs['val_p'] = p
logs['val_f1'] = f1
print('Epoch {}: val_r: {}, val_p: {}, val_f1: {}'.format(epoch+1, r, p, f1))
print(metrics.classification_report(self.valid_labels, y_pred))
示例7: reduce_aggregated_logs
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def reduce_aggregated_logs(self, aggregated_logs):
"""Reduces aggregated logs over validation steps."""
label_class = aggregated_logs['label_class']
predict_class = aggregated_logs['predict_class']
return {
'f1':
seqeval_metrics.f1_score(label_class, predict_class),
'precision':
seqeval_metrics.precision_score(label_class, predict_class),
'recall':
seqeval_metrics.recall_score(label_class, predict_class),
'accuracy':
seqeval_metrics.accuracy_score(label_class, predict_class),
}
示例8: evaluate_results
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def evaluate_results(net, data_loader, cuda, g_mask1, g_mask2, args, create_masks, create_trg_mask, ignore_idx2=7):
acc = 0; acc2 = 0
print("Evaluating...")
out_labels = []; true_labels = []
with torch.no_grad():
net.eval()
for i, data in tqdm(enumerate(data_loader), total=len(data_loader)):
if args.model_no == 0:
src_input, trg_input, trg2_input = data[0], data[1][:, :-1], data[2][:, :-1]
labels = data[1][:,1:].contiguous().view(-1)
labels2 = data[2][:,1:].contiguous().view(-1)
src_mask, trg_mask = create_masks(src_input, trg_input)
trg2_mask = create_trg_mask(trg2_input, ignore_idx=ignore_idx2)
if cuda:
src_input = src_input.cuda().long(); trg_input = trg_input.cuda().long(); labels = labels.cuda().long()
src_mask = src_mask.cuda(); trg_mask = trg_mask.cuda(); trg2_mask = trg2_mask.cuda()
trg2_input = trg2_input.cuda().long(); labels2 = labels2.cuda().long()
outputs, outputs2 = net(src_input, trg_input, trg2_input, src_mask, trg_mask, trg2_mask)
elif args.model_no == 1:
src_input, trg_input, trg2_input = data[0], data[1][:, :-1], data[2][:, :-1]
labels = data[1][:,1:].contiguous().view(-1)
labels2 = data[2][:,1:].contiguous().view(-1)
if cuda:
src_input = src_input.cuda().long(); trg_input = trg_input.cuda().long(); labels = labels.cuda().long()
trg2_input = trg2_input.cuda().long(); labels2 = labels2.cuda().long()
outputs, outputs2 = net(src_input, trg_input, trg2_input)
outputs = outputs.view(-1, outputs.size(-1))
outputs2 = outputs2.view(-1, outputs2.size(-1))
acc += evaluate(outputs, labels, ignore_idx=1)[0]
cal_acc, (o, l) = evaluate(outputs2, labels2, ignore_idx=ignore_idx2)
out_labels.append([str(i) for i in o]); true_labels.append([str(i) for i in l])
acc2 += cal_acc
accuracy = (acc/(i + 1) + acc2/(i + 1))/2
results = {
"accuracy": accuracy,
"precision": precision_score(true_labels, out_labels),
"recall": recall_score(true_labels, out_labels),
"f1": f1_score(true_labels, out_labels)
}
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return accuracy
示例9: evaluate_results
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def evaluate_results(net, data_loader, cuda, g_mask1, g_mask2, args, ignore_idx, idx2pos):
acc = 0
print("Evaluating...")
out_labels = []; true_labels = []
with torch.no_grad():
net.eval()
for i, data in tqdm(enumerate(data_loader), total=len(data_loader)):
if args.model_no == 0:
if len(data) == 4:
src_input = data[0]
src_mask = data[1]
token_type = data[2]
labels = data[3].contiguous().view(-1)
else:
src_input = data[0]
labels = data[1].contiguous().view(-1)
src_mask = (src_input != 0).long()
token_type = torch.zeros((src_input.shape[0], src_input.shape[1]), dtype=torch.long)
if cuda:
src_input = src_input.cuda().long(); labels = labels.cuda().long()
src_mask = src_mask.cuda(); token_type=token_type.cuda()
outputs = net(src_input, attention_mask=src_mask, token_type_ids=token_type)
outputs = outputs[0]
elif args.model_no == 1:
src_input, trg_input = data[0], data[1][:, :-1]
labels = data[1][:,1:].contiguous().view(-1)
if cuda:
src_input = src_input.cuda().long(); trg_input = trg_input.cuda().long(); labels = labels.cuda().long()
outputs = net(src_input, trg_input)
#print(outputs.shape); print(labels.shape)
outputs = outputs.reshape(-1, outputs.size(-1))
cal_acc, (o, l) = evaluate(outputs, labels, ignore_idx)
out_labels.append([idx2pos[i] for i in o]); true_labels.append([idx2pos[i] for i in l])
acc += cal_acc
eval_acc = acc/(i + 1)
results = {
"accuracy": eval_acc,
"precision": precision_score(true_labels, out_labels),
"recall": recall_score(true_labels, out_labels),
"f1": f1_score(true_labels, out_labels)
}
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results
示例10: evaluate_results
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import recall_score [as 别名]
def evaluate_results(net, data_loader, cuda, g_mask1, g_mask2, args, ignore_idx, idx2ner):
acc = 0
print("Evaluating...")
out_labels = []; true_labels = []
with torch.no_grad():
net.eval()
for i, data in tqdm(enumerate(data_loader), total=len(data_loader)):
if args.model_no == 0:
if len(data) == 4:
src_input = data[0]
src_mask = data[1]
token_type = data[2]
labels = data[3].contiguous().view(-1)
else:
src_input = data[0]
labels = data[1].contiguous().view(-1)
src_mask = (src_input != 0).long()
token_type = torch.zeros((src_input.shape[0], src_input.shape[1]), dtype=torch.long)
if cuda:
src_input = src_input.cuda().long(); labels = labels.cuda().long()
src_mask = src_mask.cuda(); token_type=token_type.cuda()
outputs = net(src_input, attention_mask=src_mask, token_type_ids=token_type)
outputs = outputs[0]
elif args.model_no == 1:
src_input, trg_input = data[0], data[1][:, :-1]
labels = data[1][:,1:].contiguous().view(-1)
if cuda:
src_input = src_input.cuda().long(); trg_input = trg_input.cuda().long(); labels = labels.cuda().long()
outputs = net(src_input, trg_input)
#print(outputs.shape); print(labels.shape)
outputs = outputs.reshape(-1, outputs.size(-1))
cal_acc, (o, l) = evaluate(outputs, labels, ignore_idx)
out_labels.append([idx2ner[i] for i in o]); true_labels.append([idx2ner[i] for i in l])
acc += cal_acc
eval_acc = acc/(i + 1)
results = {
"accuracy": eval_acc,
"precision": precision_score(true_labels, out_labels),
"recall": recall_score(true_labels, out_labels),
"f1": f1_score(true_labels, out_labels)
}
logger.info("***** Eval results *****")
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results