本文整理匯總了Python中sklearn.metrics.label_ranking_loss方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.label_ranking_loss方法的具體用法?Python metrics.label_ranking_loss怎麽用?Python metrics.label_ranking_loss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.metrics
的用法示例。
在下文中一共展示了metrics.label_ranking_loss方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_ranking_appropriate_input_shape
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_loss [as 別名]
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
示例2: test_ranking_loss_ties_handling
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_loss [as 別名]
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
示例3: test_label_ranking_loss
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_loss [as 別名]
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
示例4: evaluate
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import label_ranking_loss [as 別名]
def evaluate(experiment_path, meta_data=False, xml_dir="", train_dir="",
submission_file=""):
pickle_path = os.path.join(experiment_path, "predictions.pkl")
with open(pickle_path, 'rb') as input:
y_trues = pickle.load(input)
y_scores = pickle.load(input)
training_segments = pickle.load(input)
if meta_data:
elevation_scores = compute_elevation_scores(training_segments, xml_dir,
train_dir)
## Combine the scores using Bayes Thm.
normalize = np.array([np.sum(y_s * e_s) for y_s, e_s in zip(y_scores,
elevation_scores)])
y_scores = y_scores * elevation_scores / normalize[:, None]
if submission_file:
write_to_submission_file(submission_file, y_scores, training_segments,
train_dir)
return
map_score = mean_average_precision(y_trues, y_scores)
auroc_score = area_under_roc_curve(y_trues, y_scores)
# coverage error
coverage_error = metrics.coverage_error(y_trues, y_scores)
# label ranking average precision
lrap = metrics.label_ranking_average_precision_score(y_trues, y_scores)
# ranking loss
ranking_loss = metrics.label_ranking_loss(y_trues, y_scores)
print("")
print("- Top 1:", top_n(y_trues, y_scores, 1))
print("- Top 2:", top_n(y_trues, y_scores, 2))
print("- Top 3:", top_n(y_trues, y_scores, 3))
print("- Top 4:", top_n(y_trues, y_scores, 4))
print("- Top 5:", top_n(y_trues, y_scores, 5))
print("")
print("Mean Average Precision: ", map_score)
print("Area Under ROC Curve: ", auroc_score)
print("Coverage Error: ", coverage_error)
print("Label Ranking Average Precision: ", lrap)
print("Ranking Loss: ", ranking_loss)
print("Total predictions: ", len(y_scores))
return {
"map":map_score,
"auroc":auroc_score,
"coverage_error":coverage_error,
"lrap":lrap,
"ranking_loss": ranking_loss,
"top_1":top_n(y_trues, y_scores, 1),
"top_5":top_n(y_trues, y_scores, 5),
}