當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.average_precision_score方法代碼示例

本文整理匯總了Python中sklearn.metrics.average_precision_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.average_precision_score方法的具體用法?Python metrics.average_precision_score怎麽用?Python metrics.average_precision_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.average_precision_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def test(self, z, pos_edge_index, neg_edge_index):
        r"""Given latent variables :obj:`z`, positive edges
        :obj:`pos_edge_index` and negative edges :obj:`neg_edge_index`,
        computes area under the ROC curve (AUC) and average precision (AP)
        scores.

        Args:
            z (Tensor): The latent space :math:`\mathbf{Z}`.
            pos_edge_index (LongTensor): The positive edges to evaluate
                against.
            neg_edge_index (LongTensor): The negative edges to evaluate
                against.
        """
        pos_y = z.new_ones(pos_edge_index.size(1))
        neg_y = z.new_zeros(neg_edge_index.size(1))
        y = torch.cat([pos_y, neg_y], dim=0)

        pos_pred = self.decoder(z, pos_edge_index, sigmoid=True)
        neg_pred = self.decoder(z, neg_edge_index, sigmoid=True)
        pred = torch.cat([pos_pred, neg_pred], dim=0)

        y, pred = y.detach().cpu().numpy(), pred.detach().cpu().numpy()

        return roc_auc_score(y, pred), average_precision_score(y, pred) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:26,代碼來源:autoencoder.py

示例2: get_all_metrics

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    score = model.evaluate(eval_data, eval_labels, verbose=0)
    print("Test accuracy: " + str(score[1]))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr 
開發者ID:tushartushar,項目名稱:DeepLearningSmells,代碼行數:23,代碼來源:metrics_util.py

示例3: get_all_metrics_

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def get_all_metrics_(eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, precision, recall, f1, average_precision, fpr, tpr 
開發者ID:tushartushar,項目名稱:DeepLearningSmells,代碼行數:20,代碼來源:metrics_util.py

示例4: test

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def test(data, model, epoch, args):
    model.eval()

    n_iters = 0
    ap_sum = 0.0

    progress_bar = tqdm(data)
    for batch_idx, sample_batched in enumerate(progress_bar):
        img, target = load_tensor_data(sample_batched, args.cuda, volatile=True)
        
        output = model(img)
        ap = average_precision_score(target.data, output.data) 
        n_iters += 1
        ap_sum += ap
        if batch_idx % args.log_interval == 0:
            m_ap = ap_sum / n_iters
            progress_bar.set_postfix(dict(AP='{:.2}'.format(m_ap)))

    m_ap = ap_sum / n_iters
    print('Test Epoch {}: Avg. Precision Score = {:.2};'.format(epoch, m_ap)) 
開發者ID:mesnico,項目名稱:RelationNetworks-CLEVR,代碼行數:22,代碼來源:cnn_train.py

示例5: calculate_scores

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def calculate_scores(y_predicted, y_true):
    """
    Function to calculate different performance scores
    """
    accuracy = accuracy_score(y_pred=y_predicted, y_true=y_true)
    precision = precision_score(y_pred=y_predicted, y_true=y_true)
    average_precision_score1 = average_precision_score(y_score=y_predicted, y_true=y_true)
    f1_score1 = f1_score(y_pred=y_predicted, y_true=y_true)

    print("Accuracy score:", accuracy)
    print("Precision score:", precision)
    print("Average Precision score:", average_precision_score1)
    print("F1 score:", f1_score1)
    print("Outlier detection and/or treatment completed.")

    return {"accuracy": accuracy,
            "precision": precision,
            "average_precision_score": average_precision_score1,
            "f1_score": f1_score1,
            } 
開發者ID:MateLabs,項目名稱:AutoOut,代碼行數:22,代碼來源:main.py

示例6: _average_precision_slow

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def _average_precision_slow(y_true, y_score):
    """A second alternative implementation of average precision that closely
    follows the Wikipedia article's definition (see References). This should
    give identical results as `average_precision_score` for all inputs.

    References
    ----------
    .. [1] `Wikipedia entry for the Average precision
       <https://en.wikipedia.org/wiki/Average_precision>`_
    """
    precision, recall, threshold = precision_recall_curve(y_true, y_score)
    precision = list(reversed(precision))
    recall = list(reversed(recall))
    average_precision = 0
    for i in range(1, len(precision)):
        average_precision += precision[i] * (recall[i] - recall[i - 1])
    return average_precision 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:19,代碼來源:test_ranking.py

示例7: test_average_precision_score_pos_label_errors

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def test_average_precision_score_pos_label_errors():
    # Raise an error when pos_label is not in binary y_true
    y_true = np.array([0, 1])
    y_pred = np.array([0, 1])
    error_message = ("pos_label=2 is invalid. Set it to a label in y_true.")
    assert_raise_message(ValueError, error_message, average_precision_score,
                         y_true, y_pred, pos_label=2)
    # Raise an error for multilabel-indicator y_true with
    # pos_label other than 1
    y_true = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])
    y_pred = np.array([[0.9, 0.1], [0.1, 0.9], [0.8, 0.2], [0.2, 0.8]])
    error_message = ("Parameter pos_label is fixed to 1 for multilabel"
                     "-indicator y_true. Do not set pos_label or set "
                     "pos_label to 1.")
    assert_raise_message(ValueError, error_message, average_precision_score,
                         y_true, y_pred, pos_label=0) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:18,代碼來源:test_ranking.py

示例8: test_score_scale_invariance

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def test_score_scale_invariance():
    # Test that average_precision_score and roc_auc_score are invariant by
    # the scaling or shifting of probabilities
    # This test was expanded (added scaled_down) in response to github
    # issue #3864 (and others), where overly aggressive rounding was causing
    # problems for users with very small y_score values
    y_true, _, probas_pred = make_prediction(binary=True)

    roc_auc = roc_auc_score(y_true, probas_pred)
    roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
    roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
    roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
    assert_equal(roc_auc, roc_auc_scaled_up)
    assert_equal(roc_auc, roc_auc_scaled_down)
    assert_equal(roc_auc, roc_auc_shifted)

    pr_auc = average_precision_score(y_true, probas_pred)
    pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
    pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
    pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
    assert_equal(pr_auc, pr_auc_scaled_up)
    assert_equal(pr_auc, pr_auc_scaled_down)
    assert_equal(pr_auc, pr_auc_shifted) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:25,代碼來源:test_ranking.py

示例9: score_link_prediction

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def score_link_prediction(labels, scores):
    """
    Calculates the area under the ROC curve and the average precision score.

    Parameters
    ----------
    labels : array-like, shape [N]
        The ground truth labels
    scores : array-like, shape [N]
        The (unnormalized) scores of how likely are the instances

    Returns
    -------
    roc_auc : float
        Area under the ROC curve score
    ap : float
        Average precision score
    """

    return roc_auc_score(labels, scores), average_precision_score(labels, scores) 
開發者ID:abojchevski,項目名稱:graph2gauss,代碼行數:22,代碼來源:utils.py

示例10: average_precision_compute_fn

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def average_precision_compute_fn(y_preds, y_targets, mask, activation=None):
    try:
        from sklearn.metrics import average_precision_score
    except ImportError:
        raise RuntimeError("This contrib module requires sklearn to be installed.")

    y_true = y_targets.numpy()
    if activation is not None:
        y_preds = activation(y_preds)
    y_pred = y_preds.numpy()

    if mask is not None:
        y_true = y_true[:, mask]
        y_pred = y_pred[:, mask]

    return average_precision_score(y_true, y_pred) 
開發者ID:leokarlin,項目名稱:LaSO,代碼行數:18,代碼來源:metrics.py

示例11: get_ap

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def get_ap(self, q_name, sorted_idx):
        rel   = self.__relevants[q_name]
        junk  = self.__junk[q_name]

        # construct ground-truth and scores:
        y_scores = np.zeros(self.N_images)
        y_true   = np.zeros(self.N_images)
        for e,i in enumerate(sorted_idx): y_scores[i] = self.N_images - e
        for i in rel: y_true[i] = 1

        # remove junk:
        y_scores = np.delete(y_scores, junk)
        y_true   = np.delete(y_true, junk)

        # compute ap:
        return average_precision_score(y_true, y_scores) 
開發者ID:almazan,項目名稱:paiss,代碼行數:18,代碼來源:oxford.py

示例12: plot_PR_curve

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def plot_PR_curve(classifier):
    
    precision, recall, thresholds = precision_recall_curve(DataPrep.test_news['Label'], classifier)
    average_precision = average_precision_score(DataPrep.test_news['Label'], classifier)
    
    plt.step(recall, precision, color='b', alpha=0.2,
             where='post')
    plt.fill_between(recall, precision, step='post', alpha=0.2,
                     color='b')
    
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('2-class Random Forest Precision-Recall curve: AP={0:0.2f}'.format(
              average_precision)) 
開發者ID:nishitpatel01,項目名稱:Fake_News_Detection,代碼行數:18,代碼來源:classifier.py

示例13: report_metrics

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def report_metrics(y_dset, y_pred, batch_size, dset='Val'):
    # Print additional metrics involving predictions
    n_rows = (y_dset.shape[0] / batch_size) * batch_size
    y_true = y_dset[0:n_rows, :].flatten()
    y_pred = y_pred.flatten()

    val_ap = average_precision_score(y_true, y_pred)
    val_roc = roc_auc_score(y_true, y_pred)

    n = y_true.size
    n_pos = y_true.sum()
    idx_sorted = np.argsort(-y_pred)
    val_rec = []

    logging.info(dset + "-AP {:.6f}".format(val_ap))
    logging.info(dset + "-ROC {:.6f}".format(val_roc))
    for i, v in enumerate([10, 25, 50, 75, 100]):
        tp = y_true[idx_sorted[:int(v * n / 100)]].sum()
        val_rec.append(tp * 1.0 / n_pos)
        logging.info(dset + "-R{} {:.6f}".format(v, val_rec[i]))
    return val_ap, val_rec[2]


# ############################## Main program ################################# 
開發者ID:escorciav,項目名稱:deep-action-proposals,代碼行數:26,代碼來源:learning.py

示例14: evaluate_embedding_link_prediction

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def evaluate_embedding_link_prediction(adj_matrix, node_pairs, embedding_matrix, norm=False):
    """Evaluate the node embeddings on the link prediction task.

    :param adj_matrix: sp.csr_matrix, shape [n_nodes, n_nodes]
        Adjacency matrix of the graph
    :param node_pairs:
    :param embedding_matrix: np.ndarray, shape [n_nodes, embedding_dim]
        Embedding matrix
    :param norm: bool
        Whether to normalize the embeddings
    :return: float, float
        Average precision (AP) score and area under ROC curve (AUC) score
    """
    if norm:
        embedding_matrix = normalize(embedding_matrix)

    true = adj_matrix[node_pairs[:, 0], node_pairs[:, 1]].A1
    scores = (embedding_matrix[node_pairs[:, 0]] * embedding_matrix[node_pairs[:, 1]]).sum(1)

    auc_score, ap_score = roc_auc_score(true, scores), average_precision_score(true, scores)

    return auc_score, ap_score 
開發者ID:abojchevski,項目名稱:node_embedding_attack,代碼行數:24,代碼來源:utils.py

示例15: __call__

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import average_precision_score [as 別名]
def __call__(self, args, env):

        import numpy as np
        import matplotlib.pyplot as plt
        from sklearn.metrics import average_precision_score
        from sklearn.metrics import precision_recall_curve
        from vergeml.plots import load_labels, load_predictions

        try:
            labels = load_labels(env)
        except FileNotFoundError:
            raise VergeMLError("Can't plot PR curve - not supported by model.")

        nclasses = len(labels)
        if args['class'] not in labels:
            raise VergeMLError("Unknown class: " + args['class'])

        try:
            y_test, y_score = load_predictions(env, nclasses)
        except FileNotFoundError:
            raise VergeMLError("Can't plot PR curve - not supported by model.")

        # From:
        # https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html#sphx-glr-auto-examples-model-selection-plot-precision-recall-py

        ix = labels.index(args['class'])
        y_test = y_test[:,ix].astype(np.int)
        y_score = y_score[:,ix]

        precision, recall, _ = precision_recall_curve(y_test, y_score)
        average_precision = average_precision_score(y_test, y_score)

        plt.step(recall, precision, color='b', alpha=0.2, where='post')
        plt.fill_between(recall, precision, alpha=0.2, color='b', step='post')

        plt.xlabel('Recall ({})'.format(args['class']))
        plt.ylabel('Precision ({})'.format(args['class']))
        plt.ylim([0.0, 1.05])
        plt.xlim([0.0, 1.0])
        plt.title('Precision-Recall curve for @{0}: AP={1:0.2f}'.format(args['@AI'], average_precision))
        plt.show() 
開發者ID:mme,項目名稱:vergeml,代碼行數:43,代碼來源:pr.py


注:本文中的sklearn.metrics.average_precision_score方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。