本文整理匯總了Python中sklearn.metrics.roc_curve方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.roc_curve方法的具體用法?Python metrics.roc_curve怎麽用?Python metrics.roc_curve使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.metrics
的用法示例。
在下文中一共展示了metrics.roc_curve方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fit_model
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def fit_model(self, data, cross_val_data, cross_val_labels):
eval_metrics = []
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] +
cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
fp_train = get_fp(train_sm)
fp_test = get_fp(test_sm)
self.model[i].fit(fp_train, train_labels.ravel())
predicted = self.model[i].predict(fp_test)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
示例2: plot_roc_curve
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def plot_roc_curve(y_true, y_score, size=None):
"""plot_roc_curve."""
false_positive_rate, true_positive_rate, thresholds = roc_curve(
y_true, y_score)
if size is not None:
plt.figure(figsize=(size, size))
plt.axis('equal')
plt.plot(false_positive_rate, true_positive_rate, lw=2, color='navy')
plt.plot([0, 1], [0, 1], color='gray', lw=1, linestyle='--')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.ylim([-0.05, 1.05])
plt.xlim([-0.05, 1.05])
plt.grid()
plt.title('Receiver operating characteristic AUC={0:0.2f}'.format(
roc_auc_score(y_true, y_score)))
示例3: compute_roc
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def compute_roc(y_true, y_pred, plot=False):
"""
TODO
:param y_true: ground truth
:param y_pred: predictions
:param plot:
:return:
"""
fpr, tpr, _ = roc_curve(y_true, y_pred)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
示例4: compute_roc_rfeinman
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
"""
TODO
:param probs_neg:
:param probs_pos:
:param plot:
:return:
"""
probs = np.concatenate((probs_neg, probs_pos))
labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
fpr, tpr, _ = roc_curve(labels, probs)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
示例5: computeFROC
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def computeFROC(FROCGTList, FROCProbList, totalNumberOfImages, excludeList):
# Remove excluded candidates
FROCGTList_local = []
FROCProbList_local = []
for i in range(len(excludeList)):
if excludeList[i] == False:
FROCGTList_local.append(FROCGTList[i])
FROCProbList_local.append(FROCProbList[i])
numberOfDetectedLesions = sum(FROCGTList_local)
totalNumberOfLesions = sum(FROCGTList)
totalNumberOfCandidates = len(FROCProbList_local)
fpr, tpr, thresholds = skl_metrics.roc_curve(FROCGTList_local, FROCProbList_local)
if sum(FROCGTList) == len(FROCGTList): # Handle border case when there are no false positives and ROC analysis give nan values.
print "WARNING, this system has no false positives.."
fps = np.zeros(len(fpr))
else:
fps = fpr * (totalNumberOfCandidates - numberOfDetectedLesions) / totalNumberOfImages
sens = (tpr * numberOfDetectedLesions) / totalNumberOfLesions
return fps, sens, thresholds
示例6: print_roc
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def print_roc(self, y_true, y_scores, filename):
'''
Prints the ROC for this model.
'''
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_scores)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', label='ROC curve (area = %0.2f)' % self.roc_auc)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig(filename)
plt.close()
示例7: compute_auc
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def compute_auc(y_true, y_pred, label_index):
"""Compute Area Under the Curve (AUC) metric.
Args:
y_true: true class
y_pred: probabilities for a class
label_index:
label_index == 1 => laughter (class1) vs. others (class0)
label_index == 2 => filler (class1) vs. others (class0)
Returns:
auc_val: AUC metric accuracy
"""
for i in range(y_true.shape[0]):
y_true[i] = 0 if y_true[i] != label_index else 1
y_true = np.reshape(y_true, (-1,))
y_pred = np.reshape(y_pred[:, label_index], (-1,))
try:
fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1)
except UndefinedMetricWarning:
pass
auc_val = auc(fpr, tpr)
return auc_val
示例8: roc
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def roc(self, data, model, tt, name):
scores = self.get_predictions_loss(data, model, tt)[0]
labels = [prot["label"][:, 2] for prot in data[tt]]
fprs = []
tprs = []
roc_aucs = []
for s, l in zip(scores, labels):
fpr, tpr, _ = roc_curve(l, s)
roc_auc = auc(fpr, tpr)
fprs.append(fpr)
tprs.append(tpr)
roc_aucs.append(roc_auc)
auc_prot_med = np.median(roc_aucs)
auc_prot_ave = np.mean(roc_aucs)
printt("{} average protein auc: {:0.3f}".format(name, auc_prot_ave))
printt("{} median protein auc: {:0.3f}".format(name, auc_prot_med))
return ["auc_prot_ave_" + tt, "auc_prot_med_" + tt], [auc_prot_ave, auc_prot_med]
示例9: get_all_metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
auc_ = auc(fpr, tpr)
print("auc_keras:" + str(auc_))
score = model.evaluate(eval_data, eval_labels, verbose=0)
print("Test accuracy: " + str(score[1]))
precision = precision_score(eval_labels, pred_labels)
print('Precision score: {0:0.2f}'.format(precision))
recall = recall_score(eval_labels, pred_labels)
print('Recall score: {0:0.2f}'.format(recall))
f1 = f1_score(eval_labels, pred_labels)
print('F1 score: {0:0.2f}'.format(f1))
average_precision = average_precision_score(eval_labels, pred_labels)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr
示例10: get_all_metrics_
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def get_all_metrics_(eval_labels, pred_labels):
fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
auc_ = auc(fpr, tpr)
print("auc_keras:" + str(auc_))
precision = precision_score(eval_labels, pred_labels)
print('Precision score: {0:0.2f}'.format(precision))
recall = recall_score(eval_labels, pred_labels)
print('Recall score: {0:0.2f}'.format(recall))
f1 = f1_score(eval_labels, pred_labels)
print('F1 score: {0:0.2f}'.format(f1))
average_precision = average_precision_score(eval_labels, pred_labels)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
return auc_, precision, recall, f1, average_precision, fpr, tpr
示例11: accuracy
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def accuracy(y_true, y_pred):
# 計算混淆矩陣
y = np.zeros(len(y_true))
y_ = np.zeros(len(y_true))
for i in range(len(y_true)):
y[i] = np.argmax(y_true[i,:])
y_[i] = np.argmax(y_pred[i,:])
cnf_mat = confusion_matrix(y, y_)
# Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
# Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
# Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
# # 繪製ROC曲線
# fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
# Auc = auc(fpr, tpr)
# 計算多分類評價值
Sens = recall_score(y, y_, average='macro')
Prec = precision_score(y, y_, average='macro')
F1 = f1_score(y, y_, average='weighted')
Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
return Sens, Prec, F1, cnf_mat
示例12: compute_eer
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def compute_eer(loss_file,reverse,smoothing):
if not os.path.isdir(loss_file):
loss_file_list = [loss_file]
else:
loss_file_list = os.listdir(loss_file)
loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]
optimal_results = RecordResult(auc=np.inf)
for sub_loss_file in loss_file_list:
dataset, scores, labels = get_scores_labels(sub_loss_file,reverse,smoothing)
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
eer = cal_eer(fpr, tpr)
results = RecordResult(fpr, tpr, eer, dataset, sub_loss_file)
if optimal_results > results:
optimal_results = results
if os.path.isdir(loss_file):
print(results)
print('##### optimal result and model EER = {}'.format(optimal_results))
return optimal_results
示例13: test_roc_returns_consistency
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
示例14: test_roc_curve_one_label
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr, np.full(len(thresholds), np.nan))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr, np.full(len(thresholds), np.nan))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
示例15: computeFROC
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import roc_curve [as 別名]
def computeFROC(FROCGTList, FROCProbList, totalNumberOfImages, excludeList):
# Remove excluded candidates
FROCGTList_local = []
FROCProbList_local = []
for i in range(len(excludeList)):
if excludeList[i] == False:
FROCGTList_local.append(FROCGTList[i])
FROCProbList_local.append(FROCProbList[i])
numberOfDetectedLesions = sum(FROCGTList_local)
totalNumberOfLesions = sum(FROCGTList)
totalNumberOfCandidates = len(FROCProbList_local)
fpr, tpr, thresholds = skl_metrics.roc_curve(FROCGTList_local, FROCProbList_local)
if sum(FROCGTList) == len(FROCGTList): # Handle border case when there are no false positives and ROC analysis give nan values.
print "WARNING, this system has no false positives.."
fps = np.zeros(len(fpr))
else:
fps = fpr * (totalNumberOfCandidates - numberOfDetectedLesions) / totalNumberOfImages
sens = (tpr * numberOfDetectedLesions) / totalNumberOfLesions
return fps, sens, thresholds