本文整理汇总了Python中sklearn.metrics.roc_auc_score方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.roc_auc_score方法的具体用法?Python metrics.roc_auc_score怎么用?Python metrics.roc_auc_score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics
的用法示例。
在下文中一共展示了metrics.roc_auc_score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: multi_class_classification
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def multi_class_classification(data_X,data_Y):
'''
calculate multi-class classification and return related evaluation metrics
'''
svc = svm.SVC(C=1, kernel='linear')
# X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0)
clf = svc.fit(data_X, data_Y) #svm
# array = svc.coef_
# print array
predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
print "accuracy",metrics.accuracy_score(data_Y, predicted)
print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro')
print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro')
print "precision score",metrics.precision_score(data_Y, predicted, average='macro')
print "recall score",metrics.recall_score(data_Y, predicted, average='macro')
print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
print "classification_report", metrics.classification_report(data_Y, predicted)
print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
# print "log_loss", metrics.log_loss(data_Y, predicted)
print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
# print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
# print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted)
示例2: evaluation_analysis
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def evaluation_analysis(true_label,predicted):
'''
return all metrics results
'''
print "accuracy",metrics.accuracy_score(true_label, predicted)
print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')
print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro')
print "precision score",metrics.precision_score(true_label, predicted, average='macro')
print "recall score",metrics.recall_score(true_label, predicted, average='macro')
print "hamming_loss",metrics.hamming_loss(true_label, predicted)
print "classification_report", metrics.classification_report(true_label, predicted)
print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
print "log_loss", metrics.log_loss(true_label, predicted)
print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted)
示例3: test
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def test(self, z, pos_edge_index, neg_edge_index):
"""Evaluates node embeddings :obj:`z` on positive and negative test
edges by computing AUC and F1 scores.
Args:
z (Tensor): The node embeddings.
pos_edge_index (LongTensor): The positive edge indices.
neg_edge_index (LongTensor): The negative edge indices.
"""
with torch.no_grad():
pos_p = self.discriminate(z, pos_edge_index)[:, :2].max(dim=1)[1]
neg_p = self.discriminate(z, neg_edge_index)[:, :2].max(dim=1)[1]
pred = (1 - torch.cat([pos_p, neg_p])).cpu()
y = torch.cat(
[pred.new_ones((pos_p.size(0))),
pred.new_zeros(neg_p.size(0))])
pred, y = pred.numpy(), y.numpy()
auc = roc_auc_score(y, pred)
f1 = f1_score(y, pred, average='binary') if pred.sum() > 0 else 0
return auc, f1
示例4: test
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def test(self, z, pos_edge_index, neg_edge_index):
r"""Given latent variables :obj:`z`, positive edges
:obj:`pos_edge_index` and negative edges :obj:`neg_edge_index`,
computes area under the ROC curve (AUC) and average precision (AP)
scores.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to evaluate
against.
neg_edge_index (LongTensor): The negative edges to evaluate
against.
"""
pos_y = z.new_ones(pos_edge_index.size(1))
neg_y = z.new_zeros(neg_edge_index.size(1))
y = torch.cat([pos_y, neg_y], dim=0)
pos_pred = self.decoder(z, pos_edge_index, sigmoid=True)
neg_pred = self.decoder(z, neg_edge_index, sigmoid=True)
pred = torch.cat([pos_pred, neg_pred], dim=0)
y, pred = y.detach().cpu().numpy(), pred.detach().cpu().numpy()
return roc_auc_score(y, pred), average_precision_score(y, pred)
示例5: score_binary_classification
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def score_binary_classification(y, y_hat, report=True):
"""
Create binary classification output
:param y: true value
:param y_hat: class 1 probabilities
:param report:
:return:
"""
y_hat_class = [1 if x >= 0.5 else 0 for x in y_hat] # convert probability to class for classification report
report_string = "---Binary Classification Score--- \n"
report_string += classification_report(y, y_hat_class)
score = roc_auc_score(y, y_hat)
report_string += "\nAUC = " + str(score)
if report:
print(report_string)
return score, report_string
示例6: roc_auc_score
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]):
"""Compute the ROC AUC score, given the gold labels and predicted probs.
Args:
gold: A 1d array-like of gold labels
probs: A 2d array-like of predicted probabilities
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
Returns:
roc_auc_score: The (float) roc_auc score
"""
gold = arraylike_to_numpy(gold)
# Filter out the ignore_in_gold (but not ignore_in_pred)
# Note the current sub-functions (below) do not handle this...
if len(ignore_in_pred) > 0:
raise ValueError("ignore_in_pred not defined for ROC-AUC score.")
keep = [x not in ignore_in_gold for x in gold]
gold = gold[keep]
probs = probs[keep, :]
# Convert gold to one-hot indicator format, using the k inferred from probs
gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy()
return skm.roc_auc_score(gold_s, probs)
示例7: auroc
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def auroc(X, genes, labels, focus, background=None):
assert(len(genes) == X.shape[1])
focus_idx = focus == labels
if background is None:
background_idx = range(X.shape[0])
else:
background_idx = background == labels
for g, gene in enumerate(genes):
x_gene = X[:, g]
x_focus = x_gene[focus_idx]
x_background = x_gene[background_idx]
y_score = np.concatenate([ x_focus, x_background ])
y_true = np.zeros(len(x_focus) + len(x_background))
y_true[:len(x_focus)] = 1
auroc = roc_auc_score(y_true, y_score)
print('{}\t{}'.format(gene, auroc))
示例8: compute_roc_auc_scores
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def compute_roc_auc_scores(y, y_pred):
"""Transforms the results dict into roc-auc-scores and prints scores.
Parameters
----------
results: dict
task_types: dict
dict mapping task names to output type. Each output type must be either
"classification" or "regression".
"""
try:
score = roc_auc_score(y, y_pred)
except ValueError:
warnings.warn("ROC AUC score calculation failed.")
score = 0.5
return score
示例9: eval_all_scores
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def eval_all_scores(y_true_dict, y_score_dict):
# when calculating RMSE, make sure y_true_dict is the full dict of list
aps = [] # average precisions
ndcgs = [[], [], []] # return ndcg at 1, 3, 5
for q in y_true_dict:
if q not in y_score_dict:
raise ValueError("Prediction has missing items.")
if np.sum(y_true_dict[q]) != 0:
aps.append(average_precision(y_true_dict[q], y_score_dict[q]))
ndcgs[0].append(ndcg_score(y_true_dict[q], y_score_dict[q], k=1))
ndcgs[1].append(ndcg_score(y_true_dict[q], y_score_dict[q], k=3))
ndcgs[2].append(ndcg_score(y_true_dict[q], y_score_dict[q], k=5))
ndcgs = np.asarray(ndcgs)
y_true_list = trans_dict_to_list(y_true_dict, y_true_dict)
y_score_list = trans_dict_to_list(y_true_dict, y_score_dict)
auc = roc_auc_score(y_true_list, y_score_list)
rmse = np.mean((y_true_list - y_score_list)**2)
# map, ndcg@1, ndcg@3, ndcg@5, auc, rmse
return sum(aps)/len(aps), np.mean(ndcgs[0,:]), np.mean(ndcgs[1,:]), np.mean(ndcgs[2,:]), auc, np.sqrt(rmse)
# including MAP and AUC
示例10: on_epoch_end
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
#print(np.sum(y_pred[:,1]))
#y_true = np.argmax(self.y_val, axis=1)
#y_pred = np.argmax(y_pred, axis=1)
#print(y_true.shape, y_pred.shape)
if self.mymil:
score = roc_auc_score(self.y_val.max(axis=1), y_pred.max(axis=1))
else: score = roc_auc_score(self.y_val[:,1], y_pred[:,1])
print("interval evaluation - epoch: {:d} - auc: {:.2f}".format(epoch, score))
if score > self.auc:
self.auc = score
for f in os.listdir('./'):
if f.startswith(self.filepath+'auc'):
os.remove(f)
self.model.save(self.filepath+'auc'+str(score)+'ep'+str(epoch)+'.hdf5')
示例11: perform
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
#rvalue = np.array((roc_auc, prec, reca, f1))
#[0][0]
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
示例12: __call__
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def __call__(self, pos_triples, neg_triples=None):
triples = pos_triples + neg_triples
labels = [1 for _ in range(len(pos_triples))] + [0 for _ in range(len(neg_triples))]
Xr, Xe = [], []
for (s_idx, p_idx, o_idx), label in zip(triples, labels):
Xr += [[p_idx]]
Xe += [[s_idx, o_idx]]
ascores = self.scoring_function([Xr, Xe])
ays = np.array(labels)
if self.rescale_predictions:
diffs = np.diff(np.sort(ascores))
min_diff = min(abs(diffs[np.nonzero(diffs)]))
if min_diff < 1e-8:
ascores = (ascores * (1e-7 / min_diff)).astype(np.float64)
aucroc_value = metrics.roc_auc_score(ays, ascores)
precision, recall, thresholds = metrics.precision_recall_curve(ays, ascores, pos_label=1)
aucpr_value = metrics.auc(recall, precision)
return aucroc_value, aucpr_value
示例13: evaluate_print
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def evaluate_print(clf_name, y, y_pred):
"""Utility function for evaluating and printing the results for examples.
Default metrics include ROC and Precision @ n
Parameters
----------
clf_name : str
The name of the detector.
y : list or numpy array of shape (n_samples,)
The ground truth. Binary (0: inliers, 1: outliers).
y_pred : list or numpy array of shape (n_samples,)
The raw outlier scores as returned by a fitted model.
"""
y = column_or_1d(y)
y_pred = column_or_1d(y_pred)
check_consistent_length(y, y_pred)
print('{clf_name} ROC:{roc}, precision @ rank n:{prn}'.format(
clf_name=clf_name,
roc=np.round(roc_auc_score(y, y_pred), decimals=4),
prn=np.round(precision_n_scores(y, y_pred), decimals=4)))
示例14: _evaluate_final
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def _evaluate_final(self, model, xy_test, batch_size, history):
res = {}
pred_test = None
if 'val_acc' in history.history:
res['val_acc'] = max(history.history['val_acc'])
rev_ix = -1 - list(reversed(history.history['val_acc'])).index(res['val_acc'])
res['val_loss'] = history.history['val_loss'][rev_ix]
res['acc'] = history.history['acc'][-1]
res['loss'] = history.history['loss'][-1]
if len(xy_test[0]):
from sklearn.metrics import classification_report, roc_auc_score
# evaluate with test data
x_test, y_test = xy_test
pred_test = model.predict(x_test, batch_size=batch_size, verbose=0)
test_loss, test_acc = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
res['test_loss'] = test_loss
res['test_acc'] = test_acc
report = classification_report(y_true = np.argmax(y_test, axis=1),
y_pred = np.argmax(pred_test, axis=1),
target_names=self.labels,
digits=4,
output_dict=True)
res['auc'] = roc_auc_score(y_test.astype(np.int), pred_test)
for label in self.labels:
stats = report[label]
res[label+"-precision"] = stats['precision']
res[label+"-recall"] = stats['recall']
res[label+"-f1"] = stats['f1-score']
return pred_test, res
示例15: perf
# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import roc_auc_score [as 别名]
def perf(y_true, y_pred, y_score):
"""perf."""
print('Accuracy: %.2f' % accuracy_score(y_true, y_pred))
print(' AUC ROC: %.2f' % roc_auc_score(y_true, y_score))
print(' AUC AP: %.2f' % average_precision_score(y_true, y_score))
print()
print('Classification Report:')
print(classification_report(y_true, y_pred))
print()
plot_confusion_matrices(y_true, y_pred, size=int(len(set(y_true)) * 2.5))
print()
plot_aucs(y_true, y_score, size=10)