本文整理汇总了Python中sklearn.metrics.auc方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.auc方法的具体用法?Python metrics.auc怎么用?Python metrics.auc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics
的用法示例。
在下文中一共展示了metrics.auc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_model
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def fit_model(self, data, cross_val_data, cross_val_labels):
eval_metrics = []
for i in range(self.n_ensemble):
train_sm = np.concatenate(cross_val_data[:i] +
cross_val_data[(i + 1):])
test_sm = cross_val_data[i]
train_labels = np.concatenate(cross_val_labels[:i] +
cross_val_labels[(i + 1):])
test_labels = cross_val_labels[i]
fp_train = get_fp(train_sm)
fp_test = get_fp(test_sm)
self.model[i].fit(fp_train, train_labels.ravel())
predicted = self.model[i].predict(fp_test)
if self.model_type == 'classifier':
fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
eval_metrics.append(metrics.auc(fpr, tpr))
metrics_type = 'AUC'
elif self.model_type == 'regressor':
r2 = metrics.r2_score(test_labels, predicted)
eval_metrics.append(r2)
metrics_type = 'R^2 score'
return eval_metrics, metrics_type
示例2: validate_on_lfw
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def validate_on_lfw(model, lfw_160_path):
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs('validation-LFW-pairs.txt')
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
num_pairs = len(actual_issame)
all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
for k in tqdm.trange(num_pairs):
img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
batch = np.stack([img1, img2], axis=0)
embeddings = model.eval_embeddings(batch)
all_embeddings[k * 2: k * 2 + 2, :] = embeddings
tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)
print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
auc = metrics.auc(fpr, tpr)
print('Area Under Curve (AUC): %1.3f' % auc)
eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
print('Equal Error Rate (EER): %1.3f' % eer)
示例3: compute_roc
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def compute_roc(y_true, y_pred, plot=False):
"""
TODO
:param y_true: ground truth
:param y_pred: predictions
:param plot:
:return:
"""
fpr, tpr, _ = roc_curve(y_true, y_pred)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
示例4: compute_roc_rfeinman
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
"""
TODO
:param probs_neg:
:param probs_pos:
:param plot:
:return:
"""
probs = np.concatenate((probs_neg, probs_pos))
labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
fpr, tpr, _ = roc_curve(labels, probs)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
示例5: compute_auc
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def compute_auc(y_true, y_pred, label_index):
"""Compute Area Under the Curve (AUC) metric.
Args:
y_true: true class
y_pred: probabilities for a class
label_index:
label_index == 1 => laughter (class1) vs. others (class0)
label_index == 2 => filler (class1) vs. others (class0)
Returns:
auc_val: AUC metric accuracy
"""
for i in range(y_true.shape[0]):
y_true[i] = 0 if y_true[i] != label_index else 1
y_true = np.reshape(y_true, (-1,))
y_pred = np.reshape(y_pred[:, label_index], (-1,))
try:
fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1)
except UndefinedMetricWarning:
pass
auc_val = auc(fpr, tpr)
return auc_val
示例6: roc
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def roc(self, data, model, tt, name):
scores = self.get_predictions_loss(data, model, tt)[0]
labels = [prot["label"][:, 2] for prot in data[tt]]
fprs = []
tprs = []
roc_aucs = []
for s, l in zip(scores, labels):
fpr, tpr, _ = roc_curve(l, s)
roc_auc = auc(fpr, tpr)
fprs.append(fpr)
tprs.append(tpr)
roc_aucs.append(roc_auc)
auc_prot_med = np.median(roc_aucs)
auc_prot_ave = np.mean(roc_aucs)
printt("{} average protein auc: {:0.3f}".format(name, auc_prot_ave))
printt("{} median protein auc: {:0.3f}".format(name, auc_prot_med))
return ["auc_prot_ave_" + tt, "auc_prot_med_" + tt], [auc_prot_ave, auc_prot_med]
示例7: get_all_metrics
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
auc_ = auc(fpr, tpr)
print("auc_keras:" + str(auc_))
score = model.evaluate(eval_data, eval_labels, verbose=0)
print("Test accuracy: " + str(score[1]))
precision = precision_score(eval_labels, pred_labels)
print('Precision score: {0:0.2f}'.format(precision))
recall = recall_score(eval_labels, pred_labels)
print('Recall score: {0:0.2f}'.format(recall))
f1 = f1_score(eval_labels, pred_labels)
print('F1 score: {0:0.2f}'.format(f1))
average_precision = average_precision_score(eval_labels, pred_labels)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr
示例8: get_all_metrics_
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def get_all_metrics_(eval_labels, pred_labels):
fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
auc_ = auc(fpr, tpr)
print("auc_keras:" + str(auc_))
precision = precision_score(eval_labels, pred_labels)
print('Precision score: {0:0.2f}'.format(precision))
recall = recall_score(eval_labels, pred_labels)
print('Recall score: {0:0.2f}'.format(recall))
f1 = f1_score(eval_labels, pred_labels)
print('F1 score: {0:0.2f}'.format(f1))
average_precision = average_precision_score(eval_labels, pred_labels)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
return auc_, precision, recall, f1, average_precision, fpr, tpr
示例9: compute_roc_auc_scores
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def compute_roc_auc_scores(y, y_pred):
"""Transforms the results dict into roc-auc-scores and prints scores.
Parameters
----------
results: dict
task_types: dict
dict mapping task names to output type. Each output type must be either
"classification" or "regression".
"""
try:
score = roc_auc_score(y, y_pred)
except ValueError:
warnings.warn("ROC AUC score calculation failed.")
score = 0.5
return score
示例10: accuracy
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def accuracy(y_true, y_pred):
# 计算混淆矩阵
y = np.zeros(len(y_true))
y_ = np.zeros(len(y_true))
for i in range(len(y_true)):
y[i] = np.argmax(y_true[i,:])
y_[i] = np.argmax(y_pred[i,:])
cnf_mat = confusion_matrix(y, y_)
# Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
# Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
# Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
# # 绘制ROC曲线
# fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
# Auc = auc(fpr, tpr)
# 计算多分类评价值
Sens = recall_score(y, y_, average='macro')
Prec = precision_score(y, y_, average='macro')
F1 = f1_score(y, y_, average='weighted')
Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
return Sens, Prec, F1, cnf_mat
示例11: compute_eer
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def compute_eer(loss_file,reverse,smoothing):
if not os.path.isdir(loss_file):
loss_file_list = [loss_file]
else:
loss_file_list = os.listdir(loss_file)
loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]
optimal_results = RecordResult(auc=np.inf)
for sub_loss_file in loss_file_list:
dataset, scores, labels = get_scores_labels(sub_loss_file,reverse,smoothing)
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
eer = cal_eer(fpr, tpr)
results = RecordResult(fpr, tpr, eer, dataset, sub_loss_file)
if optimal_results > results:
optimal_results = results
if os.path.isdir(loss_file):
print(results)
print('##### optimal result and model EER = {}'.format(optimal_results))
return optimal_results
示例12: __call__
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def __call__(self, pos_triples, neg_triples=None):
triples = pos_triples + neg_triples
labels = [1 for _ in range(len(pos_triples))] + [0 for _ in range(len(neg_triples))]
Xr, Xe = [], []
for (s_idx, p_idx, o_idx), label in zip(triples, labels):
Xr += [[p_idx]]
Xe += [[s_idx, o_idx]]
ascores = self.scoring_function([Xr, Xe])
ays = np.array(labels)
if self.rescale_predictions:
diffs = np.diff(np.sort(ascores))
min_diff = min(abs(diffs[np.nonzero(diffs)]))
if min_diff < 1e-8:
ascores = (ascores * (1e-7 / min_diff)).astype(np.float64)
aucroc_value = metrics.roc_auc_score(ays, ascores)
precision, recall, thresholds = metrics.precision_recall_curve(ays, ascores, pos_label=1)
aucpr_value = metrics.auc(recall, precision)
return aucroc_value, aucpr_value
示例13: test_auc
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
示例14: test_auc_gold_labels_behaviour
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def test_auc_gold_labels_behaviour(self, device: str):
# Check that it works with different pos_label
auc = Auc(positive_label=4)
predictions = torch.randn(8, device=device)
labels = torch.randint(3, 5, (8,), dtype=torch.long, device=device)
# We make sure that the positive label is always present.
labels[0] = 4
auc(predictions, labels)
computed_auc_value = auc.get_metric(reset=True)
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
)
real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
assert_allclose(real_auc_value, computed_auc_value)
# Check that it errs on getting more than 2 labels.
with pytest.raises(ConfigurationError) as _:
labels = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10], device=device)
auc(predictions, labels)
示例15: compute_aupr
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import auc [as 别名]
def compute_aupr(all_targets,all_predictions):
aupr_array = []
for i in range(all_targets.shape[1]):
try:
precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i], pos_label=1)
auPR = metrics.auc(recall,precision,reorder=True)
if not math.isnan(auPR):
aupr_array.append(numpy.nan_to_num(auPR))
except:
pass
aupr_array = numpy.array(aupr_array)
mean_aupr = numpy.mean(aupr_array)
median_aupr = numpy.median(aupr_array)
var_aupr = numpy.var(aupr_array)
return mean_aupr,median_aupr,var_aupr,aupr_array