當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.f1_score方法代碼示例

本文整理匯總了Python中sklearn.metrics.f1_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.f1_score方法的具體用法?Python metrics.f1_score怎麽用?Python metrics.f1_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.f1_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: classification_scores

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def classification_scores(gts, preds, labels):
    accuracy        = metrics.accuracy_score(gts,  preds)
    class_accuracies = []
    for lab in labels: # TODO Fix
        class_accuracies.append(metrics.accuracy_score(gts[gts == lab], preds[gts == lab]))
    class_accuracies = np.array(class_accuracies)

    f1_micro        = metrics.f1_score(gts,        preds, average='micro')
    precision_micro = metrics.precision_score(gts, preds, average='micro')
    recall_micro    = metrics.recall_score(gts,    preds, average='micro')
    f1_macro        = metrics.f1_score(gts,        preds, average='macro')
    precision_macro = metrics.precision_score(gts, preds, average='macro')
    recall_macro    = metrics.recall_score(gts,    preds, average='macro')

    # class wise score
    f1s        = metrics.f1_score(gts,        preds, average=None)
    precisions = metrics.precision_score(gts, preds, average=None)
    recalls    = metrics.recall_score(gts,    preds, average=None)

    confusion = metrics.confusion_matrix(gts,preds, labels=labels)

    #TODO confusion matrix, recall, precision
    return accuracy, f1_micro, precision_micro, recall_micro, f1_macro, precision_macro, recall_macro, confusion, class_accuracies, f1s, precisions, recalls 
開發者ID:ozan-oktay,項目名稱:Attention-Gated-Networks,代碼行數:25,代碼來源:utils.py

示例2: eval_class

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def eval_class(ids_to_eval, model, z_obs):
    """
    Evaluate the model's classification performance.

    Parameters
    ----------
    ids_to_eval: np.array
        The indices of the nodes whose predictions will be evaluated.

    model: GCN
        The model to evaluate.

    z_obs: np.array
        The labels of the nodes in ids_to_eval

    Returns
    -------
    [f1_micro, f1_macro] scores

    """
    test_pred = model.predictions.eval(session=model.session, feed_dict={model.node_ids: ids_to_eval}).argmax(1)
    test_real = z_obs[ids_to_eval]

    return f1_score(test_real, test_pred, average='micro'), f1_score(test_real, test_pred, average='macro') 
開發者ID:danielzuegner,項目名稱:nettack,代碼行數:26,代碼來源:GCN.py

示例3: multi_class_classification

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def multi_class_classification(data_X,data_Y):
    '''
    calculate multi-class classification and return related evaluation metrics
    '''

    svc = svm.SVC(C=1, kernel='linear')
    # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) 
    clf = svc.fit(data_X, data_Y) #svm
    # array = svc.coef_
    # print array
    predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
    print "accuracy",metrics.accuracy_score(data_Y, predicted)
    print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') 
    print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') 
    print "precision score",metrics.precision_score(data_Y, predicted, average='macro') 
    print "recall score",metrics.recall_score(data_Y, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
    print "classification_report", metrics.classification_report(data_Y, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
    # print "log_loss", metrics.log_loss(data_Y, predicted)
    print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
    # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
    # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted) 
開發者ID:RoyZhengGao,項目名稱:edge2vec,代碼行數:25,代碼來源:multi_class_classification.py

示例4: evaluation_analysis

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def evaluation_analysis(true_label,predicted): 
    '''
    return all metrics results
    '''
    print "accuracy",metrics.accuracy_score(true_label, predicted)
    print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')     
    print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro') 
    print "precision score",metrics.precision_score(true_label, predicted, average='macro') 
    print "recall score",metrics.recall_score(true_label, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(true_label, predicted)
    print "classification_report", metrics.classification_report(true_label, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
    print "log_loss", metrics.log_loss(true_label, predicted)
    print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
    print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
    print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted) 
開發者ID:RoyZhengGao,項目名稱:edge2vec,代碼行數:18,代碼來源:link_prediction.py

示例5: test

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test(self, z, pos_edge_index, neg_edge_index):
        """Evaluates node embeddings :obj:`z` on positive and negative test
        edges by computing AUC and F1 scores.

        Args:
            z (Tensor): The node embeddings.
            pos_edge_index (LongTensor): The positive edge indices.
            neg_edge_index (LongTensor): The negative edge indices.
        """
        with torch.no_grad():
            pos_p = self.discriminate(z, pos_edge_index)[:, :2].max(dim=1)[1]
            neg_p = self.discriminate(z, neg_edge_index)[:, :2].max(dim=1)[1]
        pred = (1 - torch.cat([pos_p, neg_p])).cpu()
        y = torch.cat(
            [pred.new_ones((pos_p.size(0))),
             pred.new_zeros(neg_p.size(0))])
        pred, y = pred.numpy(), y.numpy()

        auc = roc_auc_score(y, pred)
        f1 = f1_score(y, pred, average='binary') if pred.sum() > 0 else 0

        return auc, f1 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:24,代碼來源:signed_gcn.py

示例6: test_classification_2classes_small

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test_classification_2classes_small():
    X, y = make_classification(n_samples=1000,
                               n_features=10,
                               n_classes=2,
                               n_clusters_per_class=1,
                               random_state=0)
    X = pd.DataFrame(X)
    y = pd.Series(y)
    cls = MALSS('classification').fit(X, y,
                                      'test_classification_2classes_small')
    cls.generate_module_sample()

    from sklearn.metrics import f1_score
    pred = cls.predict(X)
    print(f1_score(y, pred, average=None))

    assert len(cls.algorithms) == 5
    assert cls.algorithms[0].best_score is not None 
開發者ID:canard0328,項目名稱:malss,代碼行數:20,代碼來源:test.py

示例7: test_classification_2classes_small_jp

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test_classification_2classes_small_jp():
    X, y = make_classification(n_samples=1000,
                               n_features=10,
                               n_classes=2,
                               n_clusters_per_class=1,
                               random_state=0)
    X = pd.DataFrame(X)
    y = pd.Series(y)
    cls = MALSS('classification',
                lang='jp').fit(X, y, 'test_classification_2classes_small_jp')
    cls.generate_module_sample()

    from sklearn.metrics import f1_score
    pred = cls.predict(X)
    print(f1_score(y, pred, average=None))

    assert len(cls.algorithms) == 5
    assert cls.algorithms[0].best_score is not None 
開發者ID:canard0328,項目名稱:malss,代碼行數:20,代碼來源:test.py

示例8: test_classification_multiclass_small

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test_classification_multiclass_small():
    X, y = make_classification(n_samples=1000,
                               n_features=10,
                               n_classes=3,
                               n_clusters_per_class=1,
                               random_state=0)
    X = pd.DataFrame(X)
    y = pd.Series(y)
    cls = MALSS('classification').fit(X, y,
                                      'test_classification_multiclass_small')
    cls.generate_module_sample()

    from sklearn.metrics import f1_score
    pred = cls.predict(X)
    print(f1_score(y, pred, average=None))

    assert len(cls.algorithms) == 5
    assert cls.algorithms[0].best_score is not None 
開發者ID:canard0328,項目名稱:malss,代碼行數:20,代碼來源:test.py

示例9: test_classification_2classes_medium

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test_classification_2classes_medium():
    X, y = make_classification(n_samples=100000,
                               n_features=10,
                               n_classes=2,
                               n_clusters_per_class=1,
                               random_state=0)
    X = pd.DataFrame(X)
    y = pd.Series(y)
    cls = MALSS('classification').fit(X, y,
                                      'test_classification_2classes_medium')

    from sklearn.metrics import f1_score
    pred = cls.predict(X)
    print(f1_score(y, pred, average=None))

    assert len(cls.algorithms) == 4
    assert cls.algorithms[0].best_score is not None 
開發者ID:canard0328,項目名稱:malss,代碼行數:19,代碼來源:test.py

示例10: test_classification_2classes_big

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test_classification_2classes_big():
    X, y = make_classification(n_samples=200000,
                               n_features=20,
                               n_classes=2,
                               n_clusters_per_class=1,
                               random_state=0)
    X = pd.DataFrame(X)
    y = pd.Series(y)
    cls = MALSS('classification').fit(X, y,
                                      'test_classification_2classes_big')
    cls.generate_module_sample()

    from sklearn.metrics import f1_score
    pred = cls.predict(X)
    print(f1_score(y, pred, average=None))

    assert len(cls.algorithms) == 1
    assert cls.algorithms[0].best_score is not None 
開發者ID:canard0328,項目名稱:malss,代碼行數:20,代碼來源:test.py

示例11: test_ndarray

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def test_ndarray():
    data = pd.read_csv('http://faculty.marshall.usc.edu/gareth-james/ISL/Heart.csv',
                       index_col=0, na_values=[''])

    y = data['AHD']
    del data['AHD']

    cls = MALSS('classification').fit(np.array(data), np.array(y),
                                      'test_ndarray')
    cls.generate_module_sample()

    from sklearn.metrics import f1_score
    pred = cls.predict(np.array(data))
    print(f1_score(y, pred, average=None))

    assert len(cls.algorithms) == 5
    assert cls.algorithms[0].best_score is not None 
開發者ID:canard0328,項目名稱:malss,代碼行數:19,代碼來源:test.py

示例12: f1_score

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def f1_score(y_true, y_pred):
	"""
	Compute the micro f(b) score with b=1.
	"""
	y_true = tf.cast(y_true, "float32")
	y_pred = tf.cast(tf.round(y_pred), "float32") # implicit 0.5 threshold via tf.round
	y_correct = y_true * y_pred


	sum_true = tf.reduce_sum(y_true, axis=1)
	sum_pred = tf.reduce_sum(y_pred, axis=1)
	sum_correct = tf.reduce_sum(y_correct, axis=1)


	precision = sum_correct / sum_pred
	recall = sum_correct / sum_true
	f_score = 2 * precision * recall / (precision + recall)
	f_score = tf.where(tf.is_nan(f_score), tf.zeros_like(f_score), f_score)


	return tf.reduce_mean(f_score) 
開發者ID:AlexGidiotis,項目名稱:Document-Classifier-LSTM,代碼行數:23,代碼來源:classifier.py

示例13: load_model

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def load_model(stamp):
	"""
	"""

	json_file = open(stamp+'.json', 'r')
	loaded_model_json = json_file.read()
	json_file.close()
	model = model_from_json(loaded_model_json, {'AttentionWithContext': AttentionWithContext})

	model.load_weights(stamp+'.h5')
	print("Loaded model from disk")

	model.summary()


	adam = Adam(lr=0.001)
	model.compile(loss='binary_crossentropy',
		optimizer=adam,
		metrics=[f1_score])


	return model 
開發者ID:AlexGidiotis,項目名稱:Document-Classifier-LSTM,代碼行數:24,代碼來源:classifier.py

示例14: get_all_metrics

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def get_all_metrics(model, eval_data, eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    score = model.evaluate(eval_data, eval_labels, verbose=0)
    print("Test accuracy: " + str(score[1]))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, score[1], precision, recall, f1, average_precision, fpr, tpr 
開發者ID:tushartushar,項目名稱:DeepLearningSmells,代碼行數:23,代碼來源:metrics_util.py

示例15: get_all_metrics_

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import f1_score [as 別名]
def get_all_metrics_(eval_labels, pred_labels):
    fpr, tpr, thresholds_keras = roc_curve(eval_labels, pred_labels)
    auc_ = auc(fpr, tpr)
    print("auc_keras:" + str(auc_))

    precision = precision_score(eval_labels, pred_labels)
    print('Precision score: {0:0.2f}'.format(precision))

    recall = recall_score(eval_labels, pred_labels)
    print('Recall score: {0:0.2f}'.format(recall))

    f1 = f1_score(eval_labels, pred_labels)
    print('F1 score: {0:0.2f}'.format(f1))

    average_precision = average_precision_score(eval_labels, pred_labels)
    print('Average precision-recall score: {0:0.2f}'.format(average_precision))

    return auc_, precision, recall, f1, average_precision, fpr, tpr 
開發者ID:tushartushar,項目名稱:DeepLearningSmells,代碼行數:20,代碼來源:metrics_util.py


注:本文中的sklearn.metrics.f1_score方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。