當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.fbeta_score方法代碼示例

本文整理匯總了Python中sklearn.metrics.fbeta_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.fbeta_score方法的具體用法?Python metrics.fbeta_score怎麽用?Python metrics.fbeta_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.fbeta_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_precision_recall_f1_no_labels

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_precision_recall_f1_no_labels(beta, average):
    y_true = np.zeros((20, 3))
    y_pred = np.zeros_like(y_true)

    p, r, f, s = assert_warns(UndefinedMetricWarning,
                              precision_recall_fscore_support,
                              y_true, y_pred, average=average,
                              beta=beta)
    assert_almost_equal(p, 0)
    assert_almost_equal(r, 0)
    assert_almost_equal(f, 0)
    assert_equal(s, None)

    fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
                         y_true, y_pred,
                         beta=beta, average=average)
    assert_almost_equal(fbeta, 0) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:19,代碼來源:test_classification.py

示例2: test_precision_recall_f1_no_labels_average_none

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_precision_recall_f1_no_labels_average_none():
    y_true = np.zeros((20, 3))
    y_pred = np.zeros_like(y_true)

    beta = 1

    # tp = [0, 0, 0]
    # fn = [0, 0, 0]
    # fp = [0, 0, 0]
    # support = [0, 0, 0]
    # |y_hat_i inter y_i | = [0, 0, 0]
    # |y_i| = [0, 0, 0]
    # |y_hat_i| = [0, 0, 0]

    p, r, f, s = assert_warns(UndefinedMetricWarning,
                              precision_recall_fscore_support,
                              y_true, y_pred, average=None, beta=beta)
    assert_array_almost_equal(p, [0, 0, 0], 2)
    assert_array_almost_equal(r, [0, 0, 0], 2)
    assert_array_almost_equal(f, [0, 0, 0], 2)
    assert_array_almost_equal(s, [0, 0, 0], 2)

    fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
                         y_true, y_pred, beta=beta, average=None)
    assert_array_almost_equal(fbeta, [0, 0, 0], 2) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:27,代碼來源:test_classification.py

示例3: test_prf_average_binary_data_non_binary

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_prf_average_binary_data_non_binary():
    # Error if user does not explicitly set non-binary average mode
    y_true_mc = [1, 2, 3, 3]
    y_pred_mc = [1, 2, 3, 1]
    msg_mc = ("Target is multiclass but average='binary'. Please "
              "choose another average setting, one of ["
              "None, 'micro', 'macro', 'weighted'].")
    y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
    y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    msg_ind = ("Target is multilabel-indicator but average='binary'. Please "
               "choose another average setting, one of ["
               "None, 'micro', 'macro', 'weighted', 'samples'].")

    for y_true, y_pred, msg in [
        (y_true_mc, y_pred_mc, msg_mc),
        (y_true_ind, y_pred_ind, msg_ind),
    ]:
        for metric in [precision_score, recall_score, f1_score,
                       partial(fbeta_score, beta=2)]:
            assert_raise_message(ValueError, msg,
                                 metric, y_true, y_pred) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:23,代碼來源:test_classification.py

示例4: score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def score(self,
              actual: np.array,
              predicted: np.array,
              sample_weight: typing.Optional[np.array] = None,
              labels: typing.Optional[np.array] = None,
              **kwargs) -> float:
        lb = LabelEncoder()
        labels = lb.fit_transform(labels)
        actual = lb.transform(actual)
        method = "binary"
        if len(labels) > 2:
            predicted = np.argmax(predicted, axis=1)
            method = "micro"
        else:
            predicted = (predicted > self._threshold)
        f4_score = fbeta_score(actual, predicted, labels=labels, average=method, sample_weight=sample_weight, beta=4)
        return f4_score 
開發者ID:h2oai,項目名稱:driverlessai-recipes,代碼行數:19,代碼來源:f4_score.py

示例5: score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def score(self,
              actual: np.array,
              predicted: np.array,
              sample_weight: typing.Optional[np.array] = None,
              labels: typing.Optional[np.array] = None,
              **kwargs) -> float:
        lb = LabelEncoder()
        labels = lb.fit_transform(labels)
        actual = lb.transform(actual)
        method = "binary"
        if len(labels) > 2:
            predicted = np.argmax(predicted, axis=1)
            method = "micro"
        else:
            predicted = (predicted > self._threshold)
        f3_score = fbeta_score(actual, predicted, labels=labels, average=method, sample_weight=sample_weight, beta=3)
        return f3_score 
開發者ID:h2oai,項目名稱:driverlessai-recipes,代碼行數:19,代碼來源:f3_score.py

示例6: test_fbeta_op

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_fbeta_op(generator_fn, y_true_all, y_pred_all, pos_indices,
                  average, beta):
    # Precision on the whole dataset
    pr_sk = fbeta_score(
        y_true_all, y_pred_all, beta, pos_indices, average=average)

    # Create Tensorflow graph
    ds = tf.data.Dataset.from_generator(
        generator_fn, (tf.int32, tf.int32), ([None], [None]))
    y_true, y_pred = ds.make_one_shot_iterator().get_next()
    pr_tf = tf_metrics.fbeta(y_true, y_pred, 4, pos_indices,
                             average=average, beta=beta)

    with tf.Session() as sess:
        # Initialize and run the update op on each batch
        sess.run(tf.local_variables_initializer())
        while True:
            try:
                sess.run(pr_tf[1])
            except OutOfRangeError as e:
                break

        # Check final value
        assert np.allclose(sess.run(pr_tf[0]), pr_sk) 
開發者ID:guillaumegenthial,項目名稱:tf_metrics,代碼行數:26,代碼來源:test_fbeta.py

示例7: optimise_f2_thresholds

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def optimise_f2_thresholds(y, p, verbose=True, resolution=100):
    def mf(x):
        p2 = np.zeros_like(p)
        for i in range(17):
            p2[:, i] = (p[:, i] > x[i]).astype(np.int)
        score = fbeta_score(y, p2, beta=2, average='samples')
        return score

    x = [0.2] * 17
    for i in range(17):
        best_i2 = 0
        best_score = 0
        for i2 in range(resolution):
            i2 /= resolution
            x[i] = i2
            score = mf(x)
            if score > best_score:
                best_i2 = i2
                best_score = score
        x[i] = best_i2
        if verbose:
            print(i, best_i2, best_score)

    return x 
開發者ID:mctigger,項目名稱:KagglePlanetPytorch,代碼行數:26,代碼來源:find_best_threshold.py

示例8: test_fscore_warnings

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_fscore_warnings():
    clean_warning_registry()
    with warnings.catch_warnings(record=True) as record:
        warnings.simplefilter('always')

        for score in [f1_score, partial(fbeta_score, beta=2)]:
            score(np.array([[1, 1], [1, 1]]),
                  np.array([[0, 0], [0, 0]]),
                  average='micro')
            assert_equal(str(record.pop().message),
                         'F-score is ill-defined and '
                         'being set to 0.0 due to no predicted samples.')
            score(np.array([[0, 0], [0, 0]]),
                  np.array([[1, 1], [1, 1]]),
                  average='micro')
            assert_equal(str(record.pop().message),
                         'F-score is ill-defined and '
                         'being set to 0.0 due to no true samples.') 
開發者ID:alvarobartt,項目名稱:twitter-stock-recommendation,代碼行數:20,代碼來源:test_classification.py

示例9: fbeta_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def fbeta_score(self, beta, idx):
        beta_2 = np.power(beta, 2)
        precision = self.precision(idx)
        recall = self.recall(idx)
        nom = (1 + beta_2) * precision * recall
        den = (beta_2 * precision) + recall
        if den == 0 or den == np.nan:
            return 0
        else:
            return nom / den 
開發者ID:uber,項目名稱:ludwig,代碼行數:12,代碼來源:metrics_utils.py

示例10: f1_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def f1_score(self, idx):
        return self.fbeta_score(1, idx) 
開發者ID:uber,項目名稱:ludwig,代碼行數:4,代碼來源:metrics_utils.py

示例11: avg_fbeta_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def avg_fbeta_score(self, beta, average='macro'):
        return metrics.fbeta_score(self.conditions, self.predictions, beta=beta,
                                   average=average) 
開發者ID:uber,項目名稱:ludwig,代碼行數:5,代碼來源:metrics_utils.py

示例12: test_precision_recall_f1_score_binary

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_precision_recall_f1_score_binary():
    # Test Precision Recall and F1 Score for binary classification task
    y_true, y_pred, _ = make_prediction(binary=True)

    # detailed measures for each class
    p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
    assert_array_almost_equal(p, [0.73, 0.85], 2)
    assert_array_almost_equal(r, [0.88, 0.68], 2)
    assert_array_almost_equal(f, [0.80, 0.76], 2)
    assert_array_equal(s, [25, 25])

    # individual scoring function that can be used for grid search: in the
    # binary class case the score is the value of the measure for the positive
    # class (e.g. label == 1). This is deprecated for average != 'binary'.
    for kwargs, my_assert in [({}, assert_no_warnings),
                              ({'average': 'binary'}, assert_no_warnings)]:
        ps = my_assert(precision_score, y_true, y_pred, **kwargs)
        assert_array_almost_equal(ps, 0.85, 2)

        rs = my_assert(recall_score, y_true, y_pred, **kwargs)
        assert_array_almost_equal(rs, 0.68, 2)

        fs = my_assert(f1_score, y_true, y_pred, **kwargs)
        assert_array_almost_equal(fs, 0.76, 2)

        assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
                                      **kwargs),
                            (1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:30,代碼來源:test_classification.py

示例13: test_fbeta

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def test_fbeta(generator_fn, pos_indices, average, beta):
    for y_true, y_pred in generator_fn():
        pr_tf = tf_metrics.fbeta(
            y_true, y_pred, 4, pos_indices, average=average, beta=beta)
        pr_sk = fbeta_score(
            y_true, y_pred, beta, pos_indices, average=average)
        with tf.Session() as sess:
            sess.run(tf.local_variables_initializer())
            assert np.allclose(sess.run(pr_tf[1]), pr_sk) 
開發者ID:guillaumegenthial,項目名稱:tf_metrics,代碼行數:11,代碼來源:test_fbeta.py

示例14: f2_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def f2_score(output, target, threshold):
    output = (output > threshold)
    return fbeta_score(target, output, beta=2, average='samples') 
開發者ID:rwightman,項目名稱:pytorch-planet-amazon,代碼行數:5,代碼來源:validate.py

示例15: optimise_f2_thresholds

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import fbeta_score [as 別名]
def optimise_f2_thresholds(y, p, verbose=True, resolution=100):
    """ Find optimal threshold values for f2 score. Thanks Anokas
    https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/discussion/32475
    """
    size = y.shape[1]

    def mf(x):
        p2 = np.zeros_like(p)
        for i in range(size):
            p2[:, i] = (p[:, i] > x[i]).astype(np.int)
        score = fbeta_score(y, p2, beta=2, average='samples')
        return score

    x = [0.2] * size
    for i in range(size):
        best_i2 = 0
        best_score = 0
        for i2 in range(resolution):
            i2 /= resolution
            x[i] = i2
            score = mf(x)
            if score > best_score:
                best_i2 = i2
                best_score = score
        x[i] = best_i2
        if verbose:
            print(i, best_i2, best_score)

    return x, best_score 
開發者ID:rwightman,項目名稱:pytorch-planet-amazon,代碼行數:31,代碼來源:validate.py


注:本文中的sklearn.metrics.fbeta_score方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。