当前位置: 首页>>代码示例>>Python>>正文


Python metrics.accuracy_score方法代码示例

本文整理汇总了Python中sklearn.metrics.accuracy_score方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.accuracy_score方法的具体用法?Python metrics.accuracy_score怎么用?Python metrics.accuracy_score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.accuracy_score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: classify_1nn

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def classify_1nn(data_train, data_test):
    '''
    Classification using 1NN
    Inputs: data_train, data_test: train and test csv file path
    Outputs: yprediction and accuracy
    '''
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.metrics import accuracy_score
    from sklearn.preprocessing import StandardScaler
    data = {'src': np.loadtxt(data_train, delimiter=','),
            'tar': np.loadtxt(data_test, delimiter=','),
            }
    Xs, Ys, Xt, Yt = data['src'][:, :-1], data['src'][:, -
                                                      1], data['tar'][:, :-1], data['tar'][:, -1]
    Xs = StandardScaler(with_mean=0, with_std=1).fit_transform(Xs)
    Xt = StandardScaler(with_mean=0, with_std=1).fit_transform(Xt)
    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(Xs, Ys)
    ypred = clf.predict(Xt)
    acc = accuracy_score(y_true=Yt, y_pred=ypred)
    print('Acc: {:.4f}'.format(acc))
    return ypred, acc 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:24,代码来源:main.py

示例2: calculate

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def calculate(self):
        if not self._predictions:
            raise PipelineError("You need to add predictions for calculating the accuracy first")

        y_pred = np.concatenate(self._predictions)
        y_true = np.concatenate(self._true_labels)

        if y_pred.shape[-1] == 1:
            # Binary classification
            y_pred = (y_pred >= self._border).astype("int")
        else:
            y_pred = np.argmax(y_pred, -1)

        if len(y_true.shape) != 1:
            y_true = np.argmax(y_true, -1)

        result = accuracy_score(y_true, y_pred)
        return {"accuracy": result} 
开发者ID:PavelOstyakov,项目名称:pipeline,代码行数:20,代码来源:accuracy.py

示例3: classification_scores

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def classification_scores(gts, preds, labels):
    accuracy        = metrics.accuracy_score(gts,  preds)
    class_accuracies = []
    for lab in labels: # TODO Fix
        class_accuracies.append(metrics.accuracy_score(gts[gts == lab], preds[gts == lab]))
    class_accuracies = np.array(class_accuracies)

    f1_micro        = metrics.f1_score(gts,        preds, average='micro')
    precision_micro = metrics.precision_score(gts, preds, average='micro')
    recall_micro    = metrics.recall_score(gts,    preds, average='micro')
    f1_macro        = metrics.f1_score(gts,        preds, average='macro')
    precision_macro = metrics.precision_score(gts, preds, average='macro')
    recall_macro    = metrics.recall_score(gts,    preds, average='macro')

    # class wise score
    f1s        = metrics.f1_score(gts,        preds, average=None)
    precisions = metrics.precision_score(gts, preds, average=None)
    recalls    = metrics.recall_score(gts,    preds, average=None)

    confusion = metrics.confusion_matrix(gts,preds, labels=labels)

    #TODO confusion matrix, recall, precision
    return accuracy, f1_micro, precision_micro, recall_micro, f1_macro, precision_macro, recall_macro, confusion, class_accuracies, f1s, precisions, recalls 
开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:25,代码来源:utils.py

示例4: multi_class_classification

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def multi_class_classification(data_X,data_Y):
    '''
    calculate multi-class classification and return related evaluation metrics
    '''

    svc = svm.SVC(C=1, kernel='linear')
    # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) 
    clf = svc.fit(data_X, data_Y) #svm
    # array = svc.coef_
    # print array
    predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
    print "accuracy",metrics.accuracy_score(data_Y, predicted)
    print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') 
    print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') 
    print "precision score",metrics.precision_score(data_Y, predicted, average='macro') 
    print "recall score",metrics.recall_score(data_Y, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
    print "classification_report", metrics.classification_report(data_Y, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
    # print "log_loss", metrics.log_loss(data_Y, predicted)
    print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
    # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
    # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted) 
开发者ID:RoyZhengGao,项目名称:edge2vec,代码行数:25,代码来源:multi_class_classification.py

示例5: evaluation_analysis

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def evaluation_analysis(true_label,predicted): 
    '''
    return all metrics results
    '''
    print "accuracy",metrics.accuracy_score(true_label, predicted)
    print "f1 score macro",metrics.f1_score(true_label, predicted, average='macro')     
    print "f1 score micro",metrics.f1_score(true_label, predicted, average='micro') 
    print "precision score",metrics.precision_score(true_label, predicted, average='macro') 
    print "recall score",metrics.recall_score(true_label, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(true_label, predicted)
    print "classification_report", metrics.classification_report(true_label, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(true_label, predicted)
    print "log_loss", metrics.log_loss(true_label, predicted)
    print "zero_one_loss", metrics.zero_one_loss(true_label, predicted)
    print "AUC&ROC",metrics.roc_auc_score(true_label, predicted)
    print "matthews_corrcoef", metrics.matthews_corrcoef(true_label, predicted) 
开发者ID:RoyZhengGao,项目名称:edge2vec,代码行数:18,代码来源:link_prediction.py

示例6: get_label_accuracy

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def get_label_accuracy(predictions_file_path, gold_labels_file_path, saved_model_path):
    with open(os.path.join(saved_model_path,
                           global_config.label_to_index_dict_file), 'r') as json_file:
        label_to_index_map = json.load(json_file)

    gold_labels = list()
    prediction_labels = list()

    with open(gold_labels_file_path) as gold_labels_file:
        for text_label in gold_labels_file:
            gold_labels.append(label_to_index_map[text_label.strip()])

    with open(predictions_file_path) as predictions_file:
        for label in predictions_file:
            prediction_labels.append(int(label.strip()))

    accuracy = metrics.accuracy_score(y_true=gold_labels, y_pred=prediction_labels)
    logger.info("Classification Accuracy: {}".format(accuracy)) 
开发者ID:vineetjohn,项目名称:linguistic-style-transfer,代码行数:20,代码来源:label_accuracy.py

示例7: compute_scores

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def compute_scores(optimize):
  maml.restore()
  y_true = []
  y_pred = []
  losses = []
  for task in range(learner.n_training_tasks, n_tasks):
    learner.set_task_index(task)
    if optimize:
      maml.train_on_current_task(restore=True)
    inputs = learner.get_batch()
    loss, prediction = maml.predict_on_batch(inputs)
    y_true.append(inputs[1])
    y_pred.append(prediction[0][:, 0])
    losses.append(loss)
  y_true = np.concatenate(y_true)
  y_pred = np.concatenate(y_pred)
  print()
  print('Cross entropy loss:', np.mean(losses))
  print('Prediction accuracy:', accuracy_score(y_true, y_pred > 0.5))
  print('ROC AUC:', dc.metrics.roc_auc_score(y_true, y_pred))
  print() 
开发者ID:deepchem,项目名称:deepchem,代码行数:23,代码来源:toxcast_maml.py

示例8: score_multiclass_classification

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def score_multiclass_classification(y, y_hat, report=True):
    """
    Create multiclass classification score
    :param y:
    :param y_hat:
    :return:
    """
    report_string = "---Multiclass Classification Score--- \n"
    report_string += classification_report(y, y_hat)
    score = accuracy_score(y, y_hat)
    report_string += "\nAccuracy = " + str(score)

    if report:
        print(report_string)

    return score, report_string 
开发者ID:mbernico,项目名称:snape,代码行数:18,代码来源:score_dataset.py

示例9: test

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(outputs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
开发者ID:cxy1997,项目名称:MNIST-baselines,代码行数:19,代码来源:drop_connect_trainer.py

示例10: test

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def test(data, model, optimizer, logger, config):
    test_batches = (data.DATA_SIZE[1] + config["batch_size"] - 1) // config["batch_size"]
    for param in model.parameters():
        param.requires_grad = False
    model.eval()

    prediction = np.zeros(data.DATA_SIZE[1], dtype=np.uint8)
    for i in range(test_batches):
        inputs = Variable(torch.from_numpy(data.data_test[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1]), :]), requires_grad=False).view(-1, 1, 45, 45)
        if config["cuda"] and torch.cuda.is_available():
            inputs = inputs.cuda()
        outputs, probs = model(inputs)
        prediction[i * config["batch_size"]: min((i + 1) * config["batch_size"], data.DATA_SIZE[1])] = np.argmax(probs.data.cpu().numpy(), axis=1)

    print('Accuracy: %0.2f' % (100 * accuracy_score(data.label_test, prediction)))
    init_dir(config['output_dir'])
    np.save(os.path.join(config['output_dir'], '%s_pred.npy' % config['method']), prediction) 
开发者ID:cxy1997,项目名称:MNIST-baselines,代码行数:19,代码来源:capsnet_trainer.py

示例11: print_evaluation

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def print_evaluation(model,data,ls,log=None):
    features,actual = data
    predictions = predict(model, features, 500).data.numpy().reshape(-1).tolist()

    labels = [ls.idx[i] for i, _ in enumerate(ls.idx)]

    actual = [labels[i] for i in actual]
    predictions = [labels[i] for i in predictions]

    print(accuracy_score(actual, predictions))
    print(classification_report(actual, predictions))
    print(confusion_matrix(actual, predictions))

    data = zip(actual,predictions)
    if log is not None:
        f = open(log, "w+")
        for a,p in data:
            f.write(json.dumps({"actual": a, "predicted": p}) + "\n")
        f.close() 
开发者ID:sheffieldnlp,项目名称:fever-naacl-2018,代码行数:21,代码来源:run.py

示例12: run_evaluate

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def run_evaluate(self, test):
        """Evaluates performance on test set

        Args:
            test: dataset that yields tuple of (sentences, relation tags)

        Returns:
            metrics: (dict) metrics["acc"] = 98.4, ...

        """
        y_true, y_pred = [], []
        for data in minibatches(test, self.config.batch_size):
            word_batch, pos1_batch, pos2_batch, pos_batch, y_batch = data
            relations_pred = self.predict_batch(word_batch, pos1_batch, pos2_batch, pos_batch)
            assert len(relations_pred) == len(y_batch)
            y_true += y_batch
            y_pred += relations_pred.tolist()

        acc = accuracy_score(y_true, y_pred)
        p   = precision_score(y_true, y_pred, average='macro')
        r   = recall_score(y_true, y_pred, average='macro')
        f1  = f1_score(y_true, y_pred, average='macro')

        return {"acc":acc, "p":p, "r":r, "f1":f1} 
开发者ID:pencoa,项目名称:PCNN,代码行数:26,代码来源:pcnn_model.py

示例13: evaluate

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def evaluate(trueValues, predicted, decimals, note):
	print note
	label = 1
	avg = 'weighted'
	a = accuracy_score(trueValues, predicted)
	p = precision_score(trueValues, predicted, pos_label=label, average=avg)
	r = recall_score(trueValues, predicted, pos_label=label, average=avg)
	avg_f1 = f1_score(trueValues, predicted, pos_label=label, average=avg)
	fclasses = f1_score(trueValues, predicted, average=None)
	f1c1 = fclasses[0]; f1c2 = fclasses[1]
	fw = (f1c1 + f1c2)/2.0

	print 'accuracy:\t', str(round(a,decimals))
	print 'precision:\t', str(round(p,decimals))
	print 'recall:\t', str(round(r,decimals))
	print 'avg f1:\t', str(round(avg_f1,decimals))
	print 'c1 f1:\t', str(round(f1c1,decimals))
	print 'c2 f1:\t', str(round(f1c2,decimals))
	print 'avg(c1,c2):\t', str(round(fw,decimals))
	print '------------'

###################################################################################


# split a parallel or comparable corpus into two parts 
开发者ID:motazsaad,项目名称:comparable-text-miner,代码行数:27,代码来源:textpro.py

示例14: evaluate

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def evaluate(config, model, data_iter, test=False):
    model.eval()
    loss_total = 0
    predict_all = np.array([], dtype=int)
    labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for texts, labels in data_iter:
            outputs = model(texts)
            loss = F.cross_entropy(outputs, labels)
            loss_total += loss
            labels = labels.data.cpu().numpy()
            predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            labels_all = np.append(labels_all, labels)
            predict_all = np.append(predict_all, predic)

    acc = metrics.accuracy_score(labels_all, predict_all)
    if test:
        report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)
        confusion = metrics.confusion_matrix(labels_all, predict_all)
        return acc, loss_total / len(data_iter), report, confusion
    return acc, loss_total / len(data_iter) 
开发者ID:649453932,项目名称:Bert-Chinese-Text-Classification-Pytorch,代码行数:23,代码来源:train_eval.py

示例15: evaluate_model

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import accuracy_score [as 别名]
def evaluate_model(model, iterator):
    all_preds = []
    all_y = []
    for idx,batch in enumerate(iterator):
        if torch.cuda.is_available():
            batch = [Variable(record).cuda() for record in batch]
        else:
            batch = [Variable(record).cuda() for record in batch]
        x, y = batch
        y_pred = model(x)
        predicted = torch.max(y_pred.cpu().data, 1)[1]
        all_preds.extend(predicted.numpy())
        all_y.extend(y.cpu().numpy())
        
    score = accuracy_score(all_y, np.array(all_preds).flatten())
    return score 
开发者ID:AnubhavGupta3377,项目名称:Text-Classification-Models-Pytorch,代码行数:18,代码来源:utils.py


注:本文中的sklearn.metrics.accuracy_score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。