當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.auc方法代碼示例

本文整理匯總了Python中sklearn.metrics.auc方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.auc方法的具體用法?Python metrics.auc怎麽用?Python metrics.auc使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.auc方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: fit_model

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def fit_model(self, data, cross_val_data, cross_val_labels):
        eval_metrics = []
        for i in range(self.n_ensemble):
            train_sm = np.concatenate(cross_val_data[:i] +
                                      cross_val_data[(i + 1):])
            test_sm = cross_val_data[i]
            train_labels = np.concatenate(cross_val_labels[:i] +
                                          cross_val_labels[(i + 1):])
            test_labels = cross_val_labels[i]
            fp_train = get_fp(train_sm)
            fp_test = get_fp(test_sm)
            self.model[i].fit(fp_train, train_labels.ravel())
            predicted = self.model[i].predict(fp_test)
            if self.model_type == 'classifier':
                fpr, tpr, thresholds = metrics.roc_curve(test_labels, predicted)
                eval_metrics.append(metrics.auc(fpr, tpr))
                metrics_type = 'AUC'
            elif self.model_type == 'regressor':
                r2 = metrics.r2_score(test_labels, predicted)
                eval_metrics.append(r2)
                metrics_type = 'R^2 score'
        return eval_metrics, metrics_type 
開發者ID:Mariewelt,項目名稱:OpenChem,代碼行數:24,代碼來源:vanilla_model.py

示例2: validate_on_lfw

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)

    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings

    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
開發者ID:ppwwyyxx,項目名稱:Adversarial-Face-Attack,代碼行數:27,代碼來源:face_attack.py

示例3: compute_roc

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def compute_roc(y_true, y_pred, plot=False):
    """
    TODO
    :param y_true: ground truth
    :param y_pred: predictions
    :param plot:
    :return:
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:23,代碼來源:util.py

示例4: compute_roc_rfeinman

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:25,代碼來源:util.py

示例5: compute_auc

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def compute_auc(y_true, y_pred, label_index):
    """Compute Area Under the Curve (AUC) metric.
    Args:
        y_true: true class
        y_pred: probabilities for a class
        label_index:
            label_index == 1 => laughter (class1) vs. others (class0)
            label_index == 2 => filler (class1) vs. others (class0)
    Returns:
        auc_val: AUC metric accuracy
    """
    for i in range(y_true.shape[0]):
        y_true[i] = 0 if y_true[i] != label_index else 1

    y_true = np.reshape(y_true, (-1,))
    y_pred = np.reshape(y_pred[:, label_index], (-1,))

    try:
        fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1)
    except UndefinedMetricWarning:
        pass
    auc_val = auc(fpr, tpr)
    return auc_val 
開發者ID:hirofumi0810,項目名稱:tensorflow_end2end_speech_recognition,代碼行數:25,代碼來源:metric.py

示例6: roc

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def roc(self, data, model, tt, name):
        scores = self.get_predictions_loss(data, model, tt)[0]
        labels = [prot["label"][:, 2] for prot in data[tt]]
        fprs = []
        tprs = []
        roc_aucs = []
        for s, l in zip(scores, labels):
            fpr, tpr, _ = roc_curve(l, s)
            roc_auc = auc(fpr, tpr)
            fprs.append(fpr)
            tprs.append(tpr)
            roc_aucs.append(roc_auc)
        auc_prot_med = np.median(roc_aucs)
        auc_prot_ave = np.mean(roc_aucs)
        printt("{} average protein auc: {:0.3f}".format(name, auc_prot_ave))
        printt("{} median protein auc: {:0.3f}".format(name, auc_prot_med))
        return ["auc_prot_ave_" + tt, "auc_prot_med_" + tt], [auc_prot_ave, auc_prot_med] 
開發者ID:fouticus,項目名稱:pipgcn,代碼行數:19,代碼來源:results_processor.py

示例7: get_all_metrics

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    score = model.evaluate(eval_data, eval_labels, verbose=0)
    print("Test accuracy: " + str(score[1]))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr 
開發者ID:tushartushar,項目名稱:DeepLearningSmells,代碼行數:23,代碼來源:metrics_util.py

示例8: get_all_metrics_

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def get_all_metrics_(eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, precision, recall, f1, average_precision, fpr, tpr 
開發者ID:tushartushar,項目名稱:DeepLearningSmells,代碼行數:20,代碼來源:metrics_util.py

示例9: compute_roc_auc_scores

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def compute_roc_auc_scores(y, y_pred):
  """Transforms the results dict into roc-auc-scores and prints scores.

  Parameters
  ----------
  results: dict
  task_types: dict
    dict mapping task names to output type. Each output type must be either
    "classification" or "regression".
  """
  try:
    score = roc_auc_score(y, y_pred)
  except ValueError:
    warnings.warn("ROC AUC score calculation failed.")
    score = 0.5
  return score 
開發者ID:simonfqy,項目名稱:PADME,代碼行數:18,代碼來源:__init__.py

示例10: accuracy

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def accuracy(y_true, y_pred):        
    # 計算混淆矩陣
    y = np.zeros(len(y_true))
    y_ = np.zeros(len(y_true))    
    for i in range(len(y_true)): 
        y[i] = np.argmax(y_true[i,:])
        y_[i] = np.argmax(y_pred[i,:])
    cnf_mat = confusion_matrix(y, y_)
    
    # Acc = 1.0*(cnf_mat[1][1]+cnf_mat[0][0])/len(y_true)
    # Sens = 1.0*cnf_mat[1][1]/(cnf_mat[1][1]+cnf_mat[1][0])
    # Spec = 1.0*cnf_mat[0][0]/(cnf_mat[0][0]+cnf_mat[0][1])
    
    # # 繪製ROC曲線
    # fpr, tpr, thresholds = roc_curve(y_true[:,0], y_pred[:,0])
    # Auc = auc(fpr, tpr)
    
    
    # 計算多分類評價值
    Sens = recall_score(y, y_, average='macro')
    Prec = precision_score(y, y_, average='macro')
    F1 = f1_score(y, y_, average='weighted') 
    Support = precision_recall_fscore_support(y, y_, beta=0.5, average=None)
    return Sens, Prec, F1, cnf_mat 
開發者ID:xyj77,項目名稱:MCF-3D-CNN,代碼行數:26,代碼來源:conv_featuremaps_visualization.py

示例11: compute_eer

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def compute_eer(loss_file,reverse,smoothing):
    if not os.path.isdir(loss_file):
        loss_file_list = [loss_file]
    else:
        loss_file_list = os.listdir(loss_file)
        loss_file_list = [os.path.join(loss_file, sub_loss_file) for sub_loss_file in loss_file_list]

    optimal_results = RecordResult(auc=np.inf)
    for sub_loss_file in loss_file_list:
        dataset, scores, labels = get_scores_labels(sub_loss_file,reverse,smoothing)
        fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
        eer = cal_eer(fpr, tpr)

        results = RecordResult(fpr, tpr, eer, dataset, sub_loss_file)

        if optimal_results > results:
            optimal_results = results

        if os.path.isdir(loss_file):
            print(results)
    print('##### optimal result and model EER = {}'.format(optimal_results))
    return optimal_results 
開發者ID:fjchange,項目名稱:object_centric_VAD,代碼行數:24,代碼來源:evaluate.py

示例12: __call__

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def __call__(self, pos_triples, neg_triples=None):
        triples = pos_triples + neg_triples
        labels = [1 for _ in range(len(pos_triples))] + [0 for _ in range(len(neg_triples))]

        Xr, Xe = [], []
        for (s_idx, p_idx, o_idx), label in zip(triples, labels):
            Xr += [[p_idx]]
            Xe += [[s_idx, o_idx]]

        ascores = self.scoring_function([Xr, Xe])
        ays = np.array(labels)

        if self.rescale_predictions:
            diffs = np.diff(np.sort(ascores))
            min_diff = min(abs(diffs[np.nonzero(diffs)]))

            if min_diff < 1e-8:
                ascores = (ascores * (1e-7 / min_diff)).astype(np.float64)

        aucroc_value = metrics.roc_auc_score(ays, ascores)
        precision, recall, thresholds = metrics.precision_recall_curve(ays, ascores, pos_label=1)
        aucpr_value = metrics.auc(recall, precision)

        return aucroc_value, aucpr_value 
開發者ID:uclnlp,項目名稱:inferbeddings,代碼行數:26,代碼來源:metrics.py

示例13: test_auc

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def test_auc():
    # Test Area Under Curve (AUC) computation
    x = [0, 1]
    y = [0, 1]
    assert_array_almost_equal(auc(x, y), 0.5)
    x = [1, 0]
    y = [0, 1]
    assert_array_almost_equal(auc(x, y), 0.5)
    x = [1, 0, 0]
    y = [0, 1, 1]
    assert_array_almost_equal(auc(x, y), 0.5)
    x = [0, 1]
    y = [1, 1]
    assert_array_almost_equal(auc(x, y), 1)
    x = [0, 0.5, 1]
    y = [0, 0.5, 1]
    assert_array_almost_equal(auc(x, y), 0.5) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:19,代碼來源:test_ranking.py

示例14: test_auc_gold_labels_behaviour

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def test_auc_gold_labels_behaviour(self, device: str):
        # Check that it works with different pos_label
        auc = Auc(positive_label=4)

        predictions = torch.randn(8, device=device)
        labels = torch.randint(3, 5, (8,), dtype=torch.long, device=device)
        # We make sure that the positive label is always present.
        labels[0] = 4
        auc(predictions, labels)
        computed_auc_value = auc.get_metric(reset=True)

        false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
            labels.cpu().numpy(), predictions.cpu().numpy(), pos_label=4
        )
        real_auc_value = metrics.auc(false_positive_rates, true_positive_rates)
        assert_allclose(real_auc_value, computed_auc_value)

        # Check that it errs on getting more than 2 labels.
        with pytest.raises(ConfigurationError) as _:
            labels = torch.tensor([3, 4, 5, 6, 7, 8, 9, 10], device=device)
            auc(predictions, labels) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:23,代碼來源:auc_test.py

示例15: compute_aupr

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import auc [as 別名]
def compute_aupr(all_targets,all_predictions):
    aupr_array = []
    for i in range(all_targets.shape[1]):
        try:
            precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i], pos_label=1)
            auPR = metrics.auc(recall,precision,reorder=True)
            if not math.isnan(auPR):
                aupr_array.append(numpy.nan_to_num(auPR))
        except: 
            pass
    
    aupr_array = numpy.array(aupr_array)
    mean_aupr = numpy.mean(aupr_array)
    median_aupr = numpy.median(aupr_array)
    var_aupr = numpy.var(aupr_array)
    return mean_aupr,median_aupr,var_aupr,aupr_array 
開發者ID:QData,項目名稱:LaMP,代碼行數:18,代碼來源:evals.py


注:本文中的sklearn.metrics.auc方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。