當前位置: 首頁>>代碼示例>>Python>>正文


Python metrics.accuracy_score方法代碼示例

本文整理匯總了Python中sklearn.metrics.accuracy_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.accuracy_score方法的具體用法?Python metrics.accuracy_score怎麽用?Python metrics.accuracy_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.accuracy_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: classify_1nn

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def classify_1nn(data_train, data_test):
    '''
    Classification using 1NN
    Inputs: data_train, data_test: train and test csv file path
    Outputs: yprediction and accuracy
    '''
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.metrics import accuracy_score
    from sklearn.preprocessing import StandardScaler
    data = {'src': np.loadtxt(data_train, delimiter=','),
            'tar': np.loadtxt(data_test, delimiter=','),
            }
    Xs, Ys, Xt, Yt = data['src'][:, :-1], data['src'][:, -
                                                      1], data['tar'][:, :-1], data['tar'][:, -1]
    Xs = StandardScaler(with_mean=0, with_std=1).fit_transform(Xs)
    Xt = StandardScaler(with_mean=0, with_std=1).fit_transform(Xt)
    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(Xs, Ys)
    ypred = clf.predict(Xt)
    acc = accuracy_score(y_true=Yt, y_pred=ypred)
    print('Acc: {:.4f}'.format(acc))
    return ypred, acc 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:24,代碼來源:main.py

示例2: calculate

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def calculate(self):
        if not self._predictions:
            raise PipelineError("You need to add predictions for calculating the accuracy first")

        y_pred = np.concatenate(self._predictions)
        y_true = np.concatenate(self._true_labels)

        if y_pred.shape[-1] == 1:
            # Binary classification
            y_pred = (y_pred >= self._border).astype("int")
        else:
            y_pred = np.argmax(y_pred, -1)

        if len(y_true.shape) != 1:
            y_true = np.argmax(y_true, -1)

        result = accuracy_score(y_true, y_pred)
        return {"accuracy": result} 
開發者ID:PavelOstyakov,項目名稱:pipeline,代碼行數:20,代碼來源:accuracy.py

示例3: classification_scores

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def classification_scores(gts, preds, labels):
    accuracy        = metrics.accuracy_score(gts,  preds)
    class_accuracies = []
    for lab in labels: # TODO Fix
        class_accuracies.append(metrics.accuracy_score(gts[gts == lab], preds[gts == lab]))
    class_accuracies = np.array(class_accuracies)

    f1_micro        = metrics.f1_score(gts,        preds, average='micro')
    precision_micro = metrics.precision_score(gts, preds, average='micro')
    recall_micro    = metrics.recall_score(gts,    preds, average='micro')
    f1_macro        = metrics.f1_score(gts,        preds, average='macro')
    precision_macro = metrics.precision_score(gts, preds, average='macro')
    recall_macro    = metrics.recall_score(gts,    preds, average='macro')

    # class wise score
    f1s        = metrics.f1_score(gts,        preds, average=None)
    precisions = metrics.precision_score(gts, preds, average=None)
    recalls    = metrics.recall_score(gts,    preds, average=None)

    confusion = metrics.confusion_matrix(gts,preds, labels=labels)

    #TODO confusion matrix, recall, precision
    return accuracy, f1_micro, precision_micro, recall_micro, f1_macro, precision_macro, recall_macro, confusion, class_accuracies, f1s, precisions, recalls 
開發者ID:ozan-oktay,項目名稱:Attention-Gated-Networks,代碼行數:25,代碼來源:utils.py

示例4: multi_class_classification

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def multi_class_classification(data_X,data_Y):
    '''
    calculate multi-class classification and return related evaluation metrics
    '''

    svc = svm.SVC(C=1, kernel='linear')
    # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) 
    clf = svc.fit(data_X, data_Y) #svm
    # array = svc.coef_
    # print array
    predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
    print "accuracy",metrics.accuracy_score(data_Y, predicted)
    print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') 
    print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') 
    print "precision score",metrics.precision_score(data_Y, predicted, average='macro') 
    print "recall score",metrics.recall_score(data_Y, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
    print "classification_report", metrics.classification_report(data_Y, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
    # print "log_loss", metrics.log_loss(data_Y, predicted)
    print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
    # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
    # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted) 
開發者ID:RoyZhengGao,項目名稱:edge2vec,代碼行數:25,代碼來源:multi_class_classification.py

示例5: evaluation_analysis

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def evaluation_analysis(true_label,predicted): 
    '''
    return all metrics results
    '''
    print "accuracy",metrics.accuracy_score(true_label, predicted)
    print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')     
    print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro') 
    print "precision score",metrics.precision_score(true_label, predicted, average='macro') 
    print "recall score",metrics.recall_score(true_label, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(true_label, predicted)
    print "classification_report", metrics.classification_report(true_label, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
    print "log_loss", metrics.log_loss(true_label, predicted)
    print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
    print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
    print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted) 
開發者ID:RoyZhengGao,項目名稱:edge2vec,代碼行數:18,代碼來源:link_prediction.py

示例6: get_label_accuracy

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def get_label_accuracy(predictions_file_path, gold_labels_file_path, saved_model_path):
    with open(os.path.join(saved_model_path,
                           global_config.label_to_index_dict_file), 'r') as json_file:
        label_to_index_map = json.load(json_file)

    gold_labels = list()
    prediction_labels = list()

    with open(gold_labels_file_path) as gold_labels_file:
        for text_label in gold_labels_file:
            gold_labels.append(label_to_index_map[text_label.strip()])

    with open(predictions_file_path) as predictions_file:
        for label in predictions_file:
            prediction_labels.append(int(label.strip()))

    accuracy = metrics.accuracy_score(y_true=gold_labels, y_pred=prediction_labels)
    logger.info("Classification Accuracy: {}".format(accuracy)) 
開發者ID:vineetjohn,項目名稱:linguistic-style-transfer,代碼行數:20,代碼來源:label_accuracy.py

示例7: compute_scores

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def compute_scores(optimize):
  maml.restore()
  y_true = []
  y_pred = []
  losses = []
  for task in range(learner.n_training_tasks, n_tasks):
    learner.set_task_index(task)
    if optimize:
      maml.train_on_current_task(restore=True)
    inputs = learner.get_batch()
    loss, prediction = maml.predict_on_batch(inputs)
    y_true.append(inputs[1])
    y_pred.append(prediction[0][:, 0])
    losses.append(loss)
  y_true = np.concatenate(y_true)
  y_pred = np.concatenate(y_pred)
  print()
  print('Cross entropy loss:', np.mean(losses))
  print('Prediction accuracy:', accuracy_score(y_true, y_pred > 0.5))
  print('ROC AUC:', dc.metrics.roc_auc_score(y_true, y_pred))
  print() 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:23,代碼來源:toxcast_maml.py

示例8: score_multiclass_classification

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def score_multiclass_classification(y, y_hat, report=True):
    """
    Create multiclass classification score
    :param y:
    :param y_hat:
    :return:
    """
    report_string = "---Multiclass Classification Score--- \n"
    report_string += classification_report(y, y_hat)
    score = accuracy_score(y, y_hat)
    report_string += "\nAccuracy = " + str(score)

    if report:
        print(report_string)

    return score, report_string 
開發者ID:mbernico,項目名稱:snape,代碼行數:18,代碼來源:score_dataset.py

示例9: test

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(outputs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
開發者ID:cxy1997,項目名稱:MNIST-baselines,代碼行數:19,代碼來源:drop_connect_trainer.py

示例10: test

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs, probs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(probs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
開發者ID:cxy1997,項目名稱:MNIST-baselines,代碼行數:19,代碼來源:capsnet_trainer.py

示例11: print_evaluation

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def print_evaluation(model,data,ls,log=None):
    features,actual = data
    predictions = predict(model, features, 500).data.numpy().reshape(-1).tolist()

    labels = [ls.idx[i] for i, _ in enumerate(ls.idx)]

    actual = [labels[i] for i in actual]
    predictions = [labels[i] for i in predictions]

    print(accuracy_score(actual, predictions))
    print(classification_report(actual, predictions))
    print(confusion_matrix(actual, predictions))

    data = zip(actual,predictions)
    if log is not None:
        f = open(log, "w+")
        for a,p in data:
            f.write(json.dumps({"actual": a, "predicted": p}) + "\n")
        f.close() 
開發者ID:sheffieldnlp,項目名稱:fever-naacl-2018,代碼行數:21,代碼來源:run.py

示例12: run_evaluate

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def run_evaluate(self, test):
        """Evaluates performance on test set

        Args:
            test: dataset that yields tuple of (sentences, relation tags)

        Returns:
            metrics: (dict) metrics["acc"] = 98.4, ...

        """
        y_true, y_pred = [], []
        for data in minibatches(test, self.config.batch_size):
            word_batch, pos1_batch, pos2_batch, pos_batch, y_batch = data
            relations_pred = self.predict_batch(word_batch, pos1_batch, pos2_batch, pos_batch)
            assert len(relations_pred) == len(y_batch)
            y_true += y_batch
            y_pred += relations_pred.tolist()

        acc = accuracy_score(y_true, y_pred)
        p   = precision_score(y_true, y_pred, average='macro')
        r   = recall_score(y_true, y_pred, average='macro')
        f1  = f1_score(y_true, y_pred, average='macro')

        return {"acc":acc, "p":p, "r":r, "f1":f1} 
開發者ID:pencoa,項目名稱:PCNN,代碼行數:26,代碼來源:pcnn_model.py

示例13: evaluate

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def evaluate(trueValues, predicted, decimals, note):
	print note
	label = 1
	avg = 'weighted'
	a = accuracy_score(trueValues, predicted)
	p = precision_score(trueValues, predicted, pos_label=label, average=avg)
	r = recall_score(trueValues, predicted, pos_label=label, average=avg)
	avg_f1 = f1_score(trueValues, predicted, pos_label=label, average=avg)
	fclasses = f1_score(trueValues, predicted, average=None)
	f1c1 = fclasses[0]; f1c2 = fclasses[1]
	fw = (f1c1 + f1c2)/2.0

	print 'accuracy:\t', str(round(a,decimals))
	print 'precision:\t', str(round(p,decimals))
	print 'recall:\t', str(round(r,decimals))
	print 'avg f1:\t', str(round(avg_f1,decimals))
	print 'c1 f1:\t', str(round(f1c1,decimals))
	print 'c2 f1:\t', str(round(f1c2,decimals))
	print 'avg(c1,c2):\t', str(round(fw,decimals))
	print '------------'

###################################################################################


# split a parallel or comparable corpus into two parts 
開發者ID:motazsaad,項目名稱:comparable-text-miner,代碼行數:27,代碼來源:textpro.py

示例14: evaluate

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def evaluate(config, model, data_iter, test=False):
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for texts, labels in data_iter:
            outputs = model(texts)
            loss = F.cross_entropy(outputs, labels)
            loss_total += loss
            labels = labels.data.cpu().numpy()
            predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            labels_all = np.append(labels_all, labels)
            predict_all = np.append(predict_all, predic)

    acc = metrics.accuracy_score(labels_all, predict_all)
    if test:
        report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)
        confusion = metrics.confusion_matrix(labels_all, predict_all)
        return acc, loss_total / len(data_iter), report, confusion
    return acc, loss_total / len(data_iter) 
開發者ID:649453932,項目名稱:Bert-Chinese-Text-Classification-Pytorch,代碼行數:23,代碼來源:train_eval.py

示例15: evaluate_model

# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import accuracy_score [as 別名]
def evaluate_model(model, iterator):
    all_preds = []
    all_y = []
    for idx,batch in enumerate(iterator):
        if torch.cuda.is_available():
            batch = [Variable(record).cuda() for record in batch]
        else:
            batch = [Variable(record).cuda() for record in batch]
        x, y = batch
        y_pred = model(x)
        predicted = torch.max(y_pred.cpu().data, 1)[1]
        all_preds.extend(predicted.numpy())
        all_y.extend(y.cpu().numpy())
        
    score = accuracy_score(all_y, np.array(all_preds).flatten())
    return score 
開發者ID:AnubhavGupta3377,項目名稱:Text-Classification-Models-Pytorch,代碼行數:18,代碼來源:utils.py


注:本文中的sklearn.metrics.accuracy_score方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。