本文整理匯總了Python中sklearn.metrics.precision_recall_fscore_support方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.precision_recall_fscore_support方法的具體用法?Python metrics.precision_recall_fscore_support怎麽用?Python metrics.precision_recall_fscore_support使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.metrics
的用法示例。
在下文中一共展示了metrics.precision_recall_fscore_support方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: accuracy
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def accuracy(y_true, y_pred):
# 計算混淆矩陣
y = np.zeros(len(y_true))
y_ = np.zeros(len(y_true))
for i in range(len(y_true)):
y[i] = np.argmax(y_true[i,:])
y_[i] = np.argmax(y_pred[i,:])
cnf_mat = confusion_matrix(y, y_)
# Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
# Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
# Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
# # 繪製ROC曲線
# fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
# Auc = auc(fpr, tpr)
# 計算多分類評價值
Sens = recall_score(y, y_, average='macro')
Prec = precision_score(y, y_, average='macro')
F1 = f1_score(y, y_, average='weighted')
Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
return Sens, Prec, F1, cnf_mat
示例2: calc_test_result
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def calc_test_result(result, test_label, test_mask):
true_label=[]
predicted_label=[]
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if test_mask[i,j]==1:
true_label.append(np.argmax(test_label[i,j] ))
predicted_label.append(np.argmax(result[i,j] ))
print("Confusion Matrix :")
print(confusion_matrix(true_label, predicted_label))
print("Classification Report :")
print(classification_report(true_label, predicted_label,digits=4))
print("Accuracy ", accuracy_score(true_label, predicted_label))
print("Macro Classification Report :")
print(precision_recall_fscore_support(true_label, predicted_label,average='macro'))
print("Weighted Classification Report :")
print(precision_recall_fscore_support(true_label, predicted_label,average='weighted'))
#print "Normal Classification Report :"
#print precision_recall_fscore_support(true_label, predicted_label)
示例3: test_fbeta_multiclass_with_weighted_average
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def test_fbeta_multiclass_with_weighted_average(self, device: str):
self.predictions = self.predictions.to(device)
self.targets = self.targets.to(device)
labels = [0, 1]
fbeta = FBetaMeasure(average="weighted", labels=labels)
fbeta(self.predictions, self.targets)
metric = fbeta.get_metric()
precisions = metric["precision"]
recalls = metric["recall"]
fscores = metric["fscore"]
weighted_precision, weighted_recall, weighted_fscore, _ = precision_recall_fscore_support(
self.targets.cpu().numpy(),
self.predictions.argmax(dim=1).cpu().numpy(),
labels=labels,
average="weighted",
)
# check value
assert_allclose(precisions, weighted_precision)
assert_allclose(recalls, weighted_recall)
assert_allclose(fscores, weighted_fscore)
示例4: test_precision_recall_f1_score_binary_averaged
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert_equal(p, np.mean(ps))
assert_equal(r, np.mean(rs))
assert_equal(f, np.mean(fs))
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert_equal(p, np.average(ps, weights=support))
assert_equal(r, np.average(rs, weights=support))
assert_equal(f, np.average(fs, weights=support))
示例5: test_precision_recall_f1_no_labels
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def test_precision_recall_f1_no_labels(beta, average):
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
示例6: test_precision_recall_f1_no_labels_average_none
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def test_precision_recall_f1_no_labels_average_none():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
beta = 1
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
示例7: print_result
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def print_result(fold, y, y_predicted, id_class_mapping):
""" print result matrix """
n_classes = len(np.unique(y))
p, r, f, s = precision_recall_fscore_support(y, y_predicted, labels=None, pos_label=1, average=None)
a = [(accuracy_score(y[y == c], y_predicted[y == c])) for c in xrange(n_classes)]
# count occurrences of classes
count = Counter(y)
print("\n")
if fold is not None:
print("Results on fold %d" % fold)
print("\n")
print("%30s | %s | %5s | %4s | %4s | %4s |" % ("LABEL", "CNT", "ACC ", "PR ", "RE ", "F1 "))
print('-' * 70)
for c in xrange(n_classes):
print("%30s | %03d | %0.3f | %.2f | %.2f | %.3f |" % (id_class_mapping[c], count[c], a[c], p[c], r[c], f[c]))
print('-' * 70)
print("%30s | %03d | %0.3f | %.2f | %.2f | %.3f |" % ('average', len(y), np.mean(a), np.mean(p), np.mean(r), np.mean(f)))
print('=' * 70)
print("Overall Accuracy: %.3f %%" % (100.0 * accuracy_score(y, y_predicted)))
print('=' * 70)
示例8: _update_onco_metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def _update_onco_metrics(self, y_true, y_pred, prob):
self.onco_gene_pred = pd.Series(y_pred, self.y.index)
self.onco_gene_score = pd.Series(prob, self.y.index)
# compute metrics for classification
self.onco_gene_count[self.num_pred] = sum(y_pred)
prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred)
self.onco_precision[self.num_pred] = prec[self.onco_num]
self.onco_recall[self.num_pred] = recall[self.onco_num]
self.onco_f1_score[self.num_pred] = fscore[self.onco_num]
self.logger.debug('Onco Iter %d: Precission=%s, Recall=%s, f1_score=%s' % (
self.num_pred + 1, str(prec), str(recall), str(fscore)))
# compute ROC curve metrics
fpr, tpr, thresholds = metrics.roc_curve(y_true, prob)
self.onco_tpr_array[self.num_pred, :] = interp(self.onco_fpr_array, fpr, tpr)
#self.onco_mean_tpr[0] = 0.0
# compute Precision-Recall curve metrics
p, r, thresh = metrics.precision_recall_curve(y_true, prob)
p, r, thresh = p[::-1], r[::-1], thresh[::-1] # reverse order of results
thresh = np.insert(thresh, 0, 1.0)
self.onco_precision_array[self.num_pred, :] = interp(self.onco_recall_array, r, p)
self.onco_threshold_array[self.num_pred, :] = interp(self.onco_recall_array, r, thresh)
示例9: _update_tsg_metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def _update_tsg_metrics(self, y_true, y_pred, prob):
self.tsg_gene_pred = pd.Series(y_pred, self.y.index)
self.tsg_gene_score = pd.Series(prob, self.y.index)
# compute metrics for classification
self.tsg_gene_count[self.num_pred] = sum(y_pred)
prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred)
tsg_col = 1 # column for metrics relate to tsg
self.tsg_precision[self.num_pred] = prec[tsg_col]
self.tsg_recall[self.num_pred] = recall[tsg_col]
self.tsg_f1_score[self.num_pred] = fscore[tsg_col]
self.logger.debug('Tsg Iter %d: Precission=%s, Recall=%s, f1_score=%s' % (
self.num_pred + 1, str(prec), str(recall), str(fscore)))
# compute ROC curve metrics
fpr, tpr, thresholds = metrics.roc_curve(y_true, prob)
self.tsg_tpr_array[self.num_pred, :] = interp(self.tsg_fpr_array, fpr, tpr)
#self.tsg_tpr_array[0] = 0.0
# compute Precision-Recall curve metrics
p, r, thresh = metrics.precision_recall_curve(y_true, prob)
p, r, thresh = p[::-1], r[::-1], thresh[::-1] # reverse order of results
self.tsg_precision_array[self.num_pred, :] = interp(self.tsg_recall_array, r, p)
示例10: metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def metrics(y_pred, y_true):
""" Calucate evaluation metrics for precision, recall, and f1.
Arguments
---------
y_pred: ndarry, the predicted result list
y_true: ndarray, the ground truth label list
Returns
-------
precision: float, precision value
recall: float, recall value
f1: float, f1 measure value
"""
precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='binary')
return precision, recall, f1
示例11: validation
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def validation(classifier, data, y_data, y_target, class_names, title):
#kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
#cv = kfold
t = 'Confusion matrix: '+str(title)
x = np.transpose(data)
if (classifier == None):
print ("No accuracy to be computed")
else:
accuracy = model_selection.cross_val_score(classifier, x, y_target, scoring='accuracy')
print("Accuracy: "+ str(accuracy))
#precision = model_selection.cross_val_score(self.classifier, x, target, scoring='precision')
#precision_score(y_true, y_pred, average='macro')
#recall = model_selection.cross_val_score(self.classifier, x, target, scoring='recall')
precision, recall, fscore, m = precision_recall_fscore_support(y_target, y_data, average='macro')
cnf_matrix = confusion_matrix(y_target, y_data)
print("Precision: " +str(precision) +", Recall:" +str(recall) + ", f-score:" +str(fscore))
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, title=t)
print ("... finishing matrix plot")
plt.show()
示例12: evaluate
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def evaluate(model, data_iterator, num_steps, metric_labels):
"""Evaluate the model on `num_steps` batches."""
# set model to evaluation mode
model.eval()
output_labels = list()
target_labels = list()
# compute metrics over the dataset
for _ in range(num_steps):
# fetch the next evaluation batch
batch_data, batch_labels = next(data_iterator)
# compute model output
batch_output = model(batch_data) # batch_size x num_labels
batch_output_labels = torch.max(batch_output, dim=1)[1]
output_labels.extend(batch_output_labels.data.cpu().numpy().tolist())
target_labels.extend(batch_labels.data.cpu().numpy().tolist())
# Calculate precision, recall and F1 for all relation categories
p_r_f1_s = precision_recall_fscore_support(target_labels, output_labels, labels=metric_labels, average='micro')
p_r_f1 = {'precison': p_r_f1_s[0] * 100,
'recall': p_r_f1_s[1] * 100,
'f1': p_r_f1_s[2] * 100}
return p_r_f1
示例13: summary_util
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def summary_util(self, type):
if type == "test":
Y_hat = self.model.predict(self.X_test)
Y = self.Y_test
elif type == "train":
Y_hat = self.model.predict(self.X_train)
Y = self.Y_train
elif type == "val":
Y_hat = self.model.predict(self.X_val)
Y = self.Y_val
elif type == "forecast":
Y_hat = self.model.predict(self.X_forecast)
Y = self.Y_forecast
Y_pred = Y_hat > 0.5
precision,recall,F1,junk = precision_recall_fscore_support(Y,Y_pred)
out = dict()
out['precision']=precision[1]
out['recall']=recall[1]
out['F1']=F1[1]
return out
示例14: boot_human
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def boot_human(i, sample_size=sample_size):
np.random.seed(seed=i)
random_pids = np.random.choice(pt_list_unique_sub, size=sample_size, replace=True)
test = np.array([p_2_id_sub[pid] for pid in random_pids])
boot_list = []
for ids in test:
size = len(ids)
boot_list.append(np.random.choice(ids))
y_pred_sub = human_authored[boot_list, :]
y_true_sub = subset_y[boot_list, :]
# evaluate model
# print('calculating')
output = precision_recall_fscore_support(y_true_sub.flatten(), y_pred_sub.flatten())
precision = output[0][2]
recall = output[1][2]
f1 = output[2][2]
# print('done')
return precision, recall, f1
示例15: boot_human_clinic
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import precision_recall_fscore_support [as 別名]
def boot_human_clinic(i, sample_size=sample_size):
np.random.seed(seed=i)
random_pids = np.random.choice(pt_list_unique_sub, size=sample_size, replace=True)
test = np.array([p_2_id_sub[pid] for pid in random_pids])
boot_list = []
for ids in test:
size = len(ids)
boot_list.append(np.random.choice(ids))
y_pred_sub = subset_ClinicNet[boot_list, :]
y_true_sub = subset_y[boot_list, :]
auroc = roc_auc_score(y_true_sub, y_pred_sub, average='micro')
avg_precision = average_precision_score(y_true_sub, y_pred_sub, average='micro')
y_pred_sub[y_pred_sub<threshold_clinicnet] = 0
y_pred_sub[y_pred_sub>=threshold_clinicnet] = 1
# evaluate model
# print('calculating')
output = precision_recall_fscore_support(y_true_sub.flatten(), y_pred_sub.flatten())
# print('done')
precision = output[0][1]
recall = output[1][1]
f1 = output[2][1]
# print('done')
return auroc, avg_precision, precision, recall, f1