當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.label_ranking_average_precision_score方法代碼示例

本文整理匯總了Python中sklearn.metrics.label_ranking_average_precision_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.label_ranking_average_precision_score方法的具體用法?Python metrics.label_ranking_average_precision_score怎麽用?Python metrics.label_ranking_average_precision_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.label_ranking_average_precision_score方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: check_alternative_lrap_implementation

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
                                          n_samples=20, random_state=0):
    _, y_true = make_multilabel_classification(n_features=1,
                                               allow_unlabeled=False,
                                               random_state=random_state,
                                               n_classes=n_classes,
                                               n_samples=n_samples)

    # Score with ties
    y_score = sparse_random_matrix(n_components=y_true.shape[0],
                                   n_features=y_true.shape[1],
                                   random_state=random_state)

    if hasattr(y_score, "toarray"):
        y_score = y_score.toarray()
    score_lrap = label_ranking_average_precision_score(y_true, y_score)
    score_my_lrap = _my_lrap(y_true, y_score)
    assert_almost_equal(score_lrap, score_my_lrap)

    # Uniform score
    random_state = check_random_state(random_state)
    y_score = random_state.uniform(size=(n_samples, n_classes))
    score_lrap = label_ranking_average_precision_score(y_true, y_score)
    score_my_lrap = _my_lrap(y_true, y_score)
    assert_almost_equal(score_lrap, score_my_lrap) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:27,代碼來源:test_ranking.py

示例2: test_lrap_sample_weighting_zero_labels

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def test_lrap_sample_weighting_zero_labels():
    # Degenerate sample labeling (e.g., zero labels for a sample) is a valid
    # special case for lrap (the sample is considered to achieve perfect
    # precision), but this case is not tested in test_common.
    # For these test samples, the APs are 0.5, 0.75, and 1.0 (default for zero
    # labels).
    y_true = np.array([[1, 0, 0, 0], [1, 0, 0, 1], [0, 0, 0, 0]],
                      dtype=np.bool)
    y_score = np.array([[0.3, 0.4, 0.2, 0.1], [0.1, 0.2, 0.3, 0.4],
                        [0.4, 0.3, 0.2, 0.1]])
    samplewise_lraps = np.array([0.5, 0.75, 1.0])
    sample_weight = np.array([1.0, 1.0, 0.0])

    assert_almost_equal(
        label_ranking_average_precision_score(y_true, y_score,
                                              sample_weight=sample_weight),
        np.sum(sample_weight * samplewise_lraps) / np.sum(sample_weight)) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:19,代碼來源:test_ranking.py

示例3: lwlrap

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def lwlrap(truth, scores):
  """Calculate the overall lwlrap using sklearn.metrics.lrap."""
  # sklearn doesn't correctly apply weighting to samples with no labels, so just skip them.
  sample_weight = np.sum(truth > 0, axis=1)
  nonzero_weight_sample_indices = np.flatnonzero(sample_weight > 0)
  overall_lwlrap = label_ranking_average_precision_score(
      truth[nonzero_weight_sample_indices, :] > 0,
      scores[nonzero_weight_sample_indices, :],
      sample_weight=sample_weight[nonzero_weight_sample_indices])
  return overall_lwlrap 
開發者ID:ex4sperans,項目名稱:freesound-classification,代碼行數:12,代碼來源:utils.py

示例4: test_lrap_error_raised

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def test_lrap_error_raised():
    check_lrap_error_raised(label_ranking_average_precision_score) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:4,代碼來源:test_ranking.py

示例5: test_alternative_lrap_implementation

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def test_alternative_lrap_implementation(n_samples, n_classes, random_state):

    check_alternative_lrap_implementation(
               label_ranking_average_precision_score,
               n_classes, n_samples, random_state) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:7,代碼來源:test_ranking.py

示例6: lrap

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def lrap(prediction, target):

    # Calculate the label ranking average precision (LRAP) for every sample
    return label_ranking_average_precision_score(target, prediction) 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:6,代碼來源:metrics.py

示例7: test_label_ranking_avp

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def test_label_ranking_avp():
    for fn in [label_ranking_average_precision_score, _my_lrap]:
        yield check_lrap_toy, fn
        yield check_lrap_without_tie_and_increasing_score, fn
        yield check_lrap_only_ties, fn
        yield check_zero_or_all_relevant_labels, fn
        yield check_lrap_error_raised, label_ranking_average_precision_score

    for n_samples, n_classes, random_state in product((1, 2, 8, 20),
                                                      (2, 5, 10),
                                                      range(1)):
        yield (check_alternative_lrap_implementation,
               label_ranking_average_precision_score,
               n_classes, n_samples, random_state) 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:16,代碼來源:test_ranking.py

示例8: evaluate

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_average_precision_score [as 別名]
def evaluate(experiment_path, meta_data=False, xml_dir="", train_dir="",
             submission_file=""):
    pickle_path = os.path.join(experiment_path, "predictions.pkl")
    with open(pickle_path, 'rb') as input:
        y_trues = pickle.load(input)
        y_scores = pickle.load(input)
        training_segments = pickle.load(input)

    if meta_data:
        elevation_scores = compute_elevation_scores(training_segments, xml_dir,
                                                   train_dir)

        ## Combine the scores using Bayes Thm.
        normalize = np.array([np.sum(y_s * e_s) for y_s, e_s in zip(y_scores,
                                                                elevation_scores)])
        y_scores = y_scores * elevation_scores / normalize[:, None]

    if submission_file:
        write_to_submission_file(submission_file, y_scores, training_segments,
                                 train_dir)
        return

    map_score = mean_average_precision(y_trues, y_scores)
    auroc_score = area_under_roc_curve(y_trues, y_scores)

    # coverage error
    coverage_error = metrics.coverage_error(y_trues, y_scores)
    # label ranking average precision
    lrap = metrics.label_ranking_average_precision_score(y_trues, y_scores)
    # ranking loss
    ranking_loss = metrics.label_ranking_loss(y_trues, y_scores)

    print("")
    print("- Top 1:", top_n(y_trues, y_scores, 1))
    print("- Top 2:", top_n(y_trues, y_scores, 2))
    print("- Top 3:", top_n(y_trues, y_scores, 3))
    print("- Top 4:", top_n(y_trues, y_scores, 4))
    print("- Top 5:", top_n(y_trues, y_scores, 5))
    print("")
    print("Mean Average Precision: ", map_score)
    print("Area Under ROC Curve: ", auroc_score)
    print("Coverage Error: ", coverage_error)
    print("Label Ranking Average Precision: ", lrap)
    print("Ranking Loss: ", ranking_loss)
    print("Total predictions: ", len(y_scores))

    return {
        "map":map_score,
        "auroc":auroc_score,
        "coverage_error":coverage_error,
        "lrap":lrap,
        "ranking_loss": ranking_loss,
        "top_1":top_n(y_trues, y_scores, 1),
        "top_5":top_n(y_trues, y_scores, 5),
    } 
開發者ID:johnmartinsson,項目名稱:bird-species-classification,代碼行數:57,代碼來源:evaluate.py


注:本文中的sklearn.metrics.label_ranking_average_precision_score方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。