当前位置: 首页>>代码示例>>Python>>正文


Python metrics.recall_score函数代码示例

本文整理汇总了Python中sklearn.metrics.recall_score函数的典型用法代码示例。如果您正苦于以下问题:Python recall_score函数的具体用法?Python recall_score怎么用?Python recall_score使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了recall_score函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: stratified_k_fold

def stratified_k_fold(clf,features,labels):
    skf = StratifiedKFold( labels, n_folds=3 )
    precisions = []
    recalls = []
    for train_idx, test_idx in skf:
        features_train = []
        features_test  = []
        labels_train   = []
        labels_test    = []
        for ii in train_idx:
            features_train.append( features[ii] )
            labels_train.append( labels[ii] )
        for jj in test_idx:
            features_test.append( features[jj] )
            labels_test.append( labels[jj] )

        ### fit the classifier using training set, and test on test set
        clf.fit(features_train, labels_train)
        pred = clf.predict(features_test)


        ### for each fold, print some metrics
        print
        print "precision score: ", precision_score( labels_test, pred )
        print "recall score: ", recall_score( labels_test, pred )

        precisions.append( precision_score(labels_test, pred) )
        recalls.append( recall_score(labels_test, pred) )

    ### aggregate precision and recall over all folds
    print "average precision: ", sum(precisions)/2.
    print "average recall: ", sum(recalls)/2.
开发者ID:rahulravindran0108,项目名称:ml-udacity,代码行数:32,代码来源:cleaner.py

示例2: extratreeclassifier

def extratreeclassifier(input_file,Output,test_size):
    lvltrace.lvltrace("LVLEntree dans extratreeclassifier split_test")
    ncol=tools.file_col_coma(input_file)
    data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
    X = data[:,1:]
    y = data[:,0]
    n_samples, n_features = X.shape
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
    print X_train.shape, X_test.shape
    clf = ExtraTreesClassifier(n_estimators=10)
    clf.fit(X_train,y_train)
    y_pred = clf.predict(X_test)
    print "Extremely Randomized Trees"
    print "classification accuracy:", metrics.accuracy_score(y_test, y_pred)
    print "precision:", metrics.precision_score(y_test, y_pred)
    print "recall:", metrics.recall_score(y_test, y_pred)
    print "f1 score:", metrics.f1_score(y_test, y_pred)
    print "\n"
    results = Output+"_Extremely_Random_Forest_metrics_test.txt"
    file = open(results, "w")
    file.write("Extremely Random Forest Classifier estimator accuracy\n")
    file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y_test, y_pred))
    file.write("Precision Score: %f\n"%metrics.precision_score(y_test, y_pred))
    file.write("Recall Score: %f\n"%metrics.recall_score(y_test, y_pred))
    file.write("F1 Score: %f\n"%metrics.f1_score(y_test, y_pred))
    file.write("\n")
    file.write("True Value, Predicted Value, Iteration\n")
    for n in xrange(len(y_test)):
        file.write("%f,%f,%i\n"%(y_test[n],y_pred[n],(n+1)))
    file.close()
    title = "Extremely Randomized Trees %f"%test_size
    save = Output + "Extremely_Randomized_Trees_confusion_matrix"+"_%s.png"%test_size
    plot_confusion_matrix(y_test, y_pred,title,save)
    lvltrace.lvltrace("LVLSortie dans extratreeclassifier split_test")
开发者ID:xaviervasques,项目名称:Neuron_Morpho_Classification_ML,代码行数:34,代码来源:supervised_split_test.py

示例3: SVC_linear

def SVC_linear(input_file,Output,test_size):
    lvltrace.lvltrace("LVLEntree dans SVC_linear split_test")
    ncol=tools.file_col_coma(input_file)
    data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
    X = data[:,1:]
    y = data[:,0]
    n_samples, n_features = X.shape
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
    print X_train.shape, X_test.shape
    clf=svm.SVC(kernel='linear')
    clf.fit(X_train,y_train)
    y_pred = clf.predict(X_test)
    print "C-Support Vector Classifcation (with RBF linear) "
    print "y_test, y_pred, iteration"
    print "classification accuracy:", metrics.accuracy_score(y_test, y_pred)
    print "precision:", metrics.precision_score(y_test, y_pred)
    print "recall:", metrics.recall_score(y_test, y_pred)
    print "f1 score:", metrics.f1_score(y_test, y_pred)
    print "\n"
    results = Output+"SVM_Linear_Kernel_metrics_test.txt"
    file = open(results, "w")
    file.write("Support Vector Machine with Linear Kernel estimator accuracy\n")
    file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y_test, y_pred))
    file.write("Precision Score: %f\n"%metrics.precision_score(y_test, y_pred))
    file.write("Recall Score: %f\n"%metrics.recall_score(y_test, y_pred))
    file.write("F1 Score: %f\n"%metrics.f1_score(y_test, y_pred))
    file.write("\n")
    file.write("True Value, Predicted Value, Iteration\n")
    for n in xrange(len(y_test)):
        file.write("%f,%f,%i\n"%(y_test[n],y_pred[n],(n+1)))
    file.close()
    title = "SVC linear %f"%test_size
    save = Output + "SVC_linear_confusion_matrix"+"_%s.png"%test_size
    plot_confusion_matrix(y_test, y_pred,title,save)
    lvltrace.lvltrace("LVLsortie dans SVC_linear split_test")
开发者ID:xaviervasques,项目名称:Neuron_Morpho_Classification_ML,代码行数:35,代码来源:supervised_split_test.py

示例4: nearest_centroid

def nearest_centroid(input_file,Output,test_size):
    ncol=tools.file_col_coma(input_file)
    data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
    X = data[:,1:]
    y = data[:,0]
    n_samples, n_features = X.shape
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
    print X_train.shape, X_test.shape
    clf = NearestCentroid()
    clf.fit(X_train,y_train)
    y_pred = clf.predict(X_test)
    print "Nearest Centroid Classifier "
    print "classification accuracy:", metrics.accuracy_score(y_test, y_pred)
    print "precision:", metrics.precision_score(y_test, y_pred)
    print "recall:", metrics.recall_score(y_test, y_pred)
    print "f1 score:", metrics.f1_score(y_test, y_pred)
    print "\n"
    results = Output+"Nearest_Centroid_metrics_test.txt"
    file = open(results, "w")
    file.write("Nearest Centroid Classifier estimator accuracy\n")
    file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y_test, y_pred))
    file.write("Precision Score: %f\n"%metrics.precision_score(y_test, y_pred))
    file.write("Recall Score: %f\n"%metrics.recall_score(y_test, y_pred))
    file.write("F1 Score: %f\n"%metrics.f1_score(y_test, y_pred))
    file.write("\n")
    file.write("True Value, Predicted Value, Iteration\n")
    for n in xrange(len(y_test)):
        file.write("%f,%f,%i\n"%(y_test[n],y_pred[n],(n+1)))
    file.close()
    title = "Nearest Centroid %f"%test_size
    save = Output + "Nearest_Centroid_confusion_matrix"+"_%s.png"%test_size
    plot_confusion_matrix(y_test, y_pred,title,save)
    lvltrace.lvltrace("LVLSortie dans stochasticGD split_test")
开发者ID:xaviervasques,项目名称:Neuron_Morpho_Classification_ML,代码行数:33,代码来源:supervised_split_test.py

示例5: trainModel

    def trainModel(self,folds):
        
        kf = cross_validation.StratifiedKFold(self.y_total,n_folds=folds,shuffle=True,random_state=random.randint(1,100))

        for (train_index,test_index) in (kf):
          
            self.X_train = [self.X_total[i] for i in train_index]
            self.X_test = [self.X_total[i] for i in test_index] 
            self.y_train = [self.y_total[i] for i in train_index]
            self.y_test = [self.y_total[i] for i in test_index] 

            print "################"
            print "Original"
            print np.array(self.y_test)
            print "################"
            self.clf = self.clf.fit(self.X_train,self.y_train)
            print "Predicted"
            y_pred = self.clf.predict(self.X_test)
            print y_pred
            print "################"
            print "Evaluation\n"           
            cm = confusion_matrix(self.y_test,y_pred)            
            print cm
            print "Precision Score:"
            print precision_score(self.y_test,y_pred,average="macro")
            print "Recall Score:"
            print recall_score(self.y_test,y_pred,average="macro") 
            print "Accuracy Score:"
            print accuracy_score(self.y_test,y_pred)
开发者ID:massi92,项目名称:DIP-Project_MGFF,代码行数:29,代码来源:FeaturesLearning.py

示例6: predictSVD

def predictSVD(svd, row, column, d):
    # start = timeit.default_timer()
    u = svd[0] #clf.components_ 
    s = svd[1] #clf.explained_variance_
    vt = svd[2] #clf.fit_transform(X)
    # print "   fitting done.";
    # stop = timeit.default_timer()
    # print "   runtime: " + str(stop - start)
    # print "d:"
    # print d

    # matrixY = clf.components_ 
    probsY = []
    # print "dot products:"
    for i in range(len(row)):
        # print np.dot(u[:,column[i]], v[row[i],:])
        prob = np.sum(u[column[i],:]*s*vt[:,row[i]])
        if(prob < 0): prob = 0
        if(prob > 1): prob = 1
        probsY.append(prob)

    probsY = np.array(probsY)
    preds = np.zeros(shape=len(probsY))
    preds[probsY >= 0.5] = 1

    print "Precision"
    print precision_score(d, preds)
    print "Recall"
    print recall_score(d, preds)
    print "F-Score"
    print f1_score(d, preds)

    return probsY, preds
开发者ID:noraw,项目名称:COS424-Assignment3,代码行数:33,代码来源:main.py

示例7: _clf_mlp

def _clf_mlp(trX,teX,trY,teY):
	print "MLP"
	print trX.shape,"trX shape"
	print "Enter Layer for MLP"
	layer=input()
	# print "enter delIdx"
	# delIdx=input()
	# while(delIdx):
	# 	trX=np.delete(trX,-1,axis=0)
	# 	trY=np.delete(trY,-1,axis=0)
	# 	delIdx=delIdx-1
	print "factors",factors(trX.shape[0])	
	teY=teY.astype(np.int32)
	trY=trY.astype(np.int32)
	print trX.shape,"trX shape"
	print "enter no of mini batch"
	mini_batch=int(input())
	mlp = TfMultiLayerPerceptron(eta=0.01, 
                             epochs=100, 
                             hidden_layers=layer,
                             activations=['relu' for i in range(len(layer))],
                             print_progress=3, 
                             minibatches=mini_batch, 
                             optimizer='adam',
                             random_seed=1)
	mlp.fit(trX,trY)
	pred=mlp.predict(teX)
	print _f_count(teY),"test f count"
	pred=pred.astype(np.int32)
	print _f_count(pred),"pred f count"
	conf_mat=confusion_matrix(teY, pred)
	process_cm(conf_mat, to_print=True)
	print precision_score(teY,pred),"Precision Score"
	print recall_score(teY,pred),"Recall Score"
	print roc_auc_score(teY,pred), "ROC_AUC"
开发者ID:nthakor,项目名称:imbalance_algorithms,代码行数:35,代码来源:clf_utils.py

示例8: main

def main():
    resize_shape = 64
    print "data is loading..."
    train_X, train_Y, test_X, test_Y = load_data(resize_shape)
    print "data is loaded"
    print "feature engineering..."
    learning_rate = 0.01
    training_iters = 100000
    batch_size = 128
    display_step = 10

    # Network Parameters
    n_input = resize_shape*resize_shape # MNIST data input (img shape: 28*28)
    n_classes = 62 # MNIST total classes (0-9 digits)
    dropout = 0.5 # Dropout, probability to keep units

    with tf.Session() as sess:
        cnn = CNN(sess, learning_rate, training_iters, batch_size, display_step, n_input, n_classes, dropout,resize_shape)
        train_X = cnn.inference(train_X)
        test_X = cnn.inference(test_X)

    print "feature engineering is complete"

    print 'training phase'
    clf = svm.LinearSVC().fit(train_X, train_Y)
    print 'test phase'
    predicts = clf.predict(test_X)

    # measure function
    print 'measure phase'
    print confusion_matrix(test_Y, predicts)
    print f1_score(test_Y, predicts, average=None)
    print precision_score(test_Y, predicts, average=None)
    print recall_score(test_Y, predicts, average=None)
    print accuracy_score(test_Y, predicts)
开发者ID:SkyoIn,项目名称:LFD2016_Proj2_Team3,代码行数:35,代码来源:project2_svm.py

示例9: stochasticGD

def stochasticGD(input_file,Output):
    lvltrace.lvltrace("LVLEntree dans stochasticGD")
    ncol=tools.file_col_coma(input_file)
    data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
    X = data[:,1:]
    y = data[:,0]
    n_samples, n_features = X.shape
    clf = SGDClassifier(loss="hinge", penalty="l2")
    clf.fit(X,y)
    y_pred = clf.predict(X)
    print "#########################################################################################################\n"
    print "Stochastic Gradient Descent "
    print "classification accuracy:", metrics.accuracy_score(y, y_pred)
    print "precision:", metrics.precision_score(y, y_pred)
    print "recall:", metrics.recall_score(y, y_pred)
    print "f1 score:", metrics.f1_score(y, y_pred)
    print "\n"
    print "#########################################################################################################\n"
    results = Output+"Stochastic_GD_metrics.txt"
    file = open(results, "w")
    file.write("Stochastic Gradient Descent estimator accuracy\n")
    file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y, y_pred))
    file.write("Precision Score: %f\n"%metrics.precision_score(y, y_pred))
    file.write("Recall Score: %f\n"%metrics.recall_score(y, y_pred))
    file.write("F1 Score: %f\n"%metrics.f1_score(y, y_pred))
    file.write("\n")
    file.write("True Value, Predicted Value, Iteration\n")
    for n in xrange(len(y)):
        file.write("%f,%f,%i\n"%(y[n],y_pred[n],(n+1)))
    file.close()
    title = "Stochastic Gradient Descent"
    save = Output + "Stochastic_GD_confusion_matrix.png"
    plot_confusion_matrix(y, y_pred,title,save)
    lvltrace.lvltrace("LVLSortie dans stochasticGD")
开发者ID:xaviervasques,项目名称:Neuron_Morpho_Classification_ML,代码行数:34,代码来源:supervised.py

示例10: randomforest

def randomforest(input_file,Output):
    lvltrace.lvltrace("LVLEntree dans randomforest")
    ncol=tools.file_col_coma(input_file)
    data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
    X = data[:,1:]
    y = data[:,0]
    n_samples, n_features = X.shape
    clf = RandomForestClassifier(n_estimators=10)
    clf.fit(X,y)
    y_pred = clf.predict(X)
    print "#########################################################################################################\n"
    print "The Random forest algo "
    print "classification accuracy:", metrics.accuracy_score(y, y_pred)
    print "precision:", metrics.precision_score(y, y_pred)
    print "recall:", metrics.recall_score(y, y_pred)
    print "f1 score:", metrics.f1_score(y, y_pred)
    print "\n"
    print "#########################################################################################################\n"
    results = Output+"Random_Forest_metrics.txt"
    file = open(results, "w")
    file.write("Random Forest Classifier estimator accuracy\n")
    file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y, y_pred))
    file.write("Precision Score: %f\n"%metrics.precision_score(y, y_pred))
    file.write("Recall Score: %f\n"%metrics.recall_score(y, y_pred))
    file.write("F1 Score: %f\n"%metrics.f1_score(y, y_pred))
    file.write("\n")
    file.write("True Value, Predicted Value, Iteration\n")
    for n in xrange(len(y)):
        file.write("%f,%f,%i\n"%(y[n],y_pred[n],(n+1)))
    file.close()
    title = "The Random forest"
    save = Output + "Random_Forest_confusion_matrix.png"
    plot_confusion_matrix(y, y_pred,title,save)
    lvltrace.lvltrace("LVLSortie dans randomforest")
开发者ID:xaviervasques,项目名称:Neuron_Morpho_Classification_ML,代码行数:34,代码来源:supervised.py

示例11: SVC_linear

def SVC_linear(input_file,Output):
    lvltrace.lvltrace("LVLEntree dans SVC_linear")
    ncol=tools.file_col_coma(input_file)
    data = np.loadtxt(input_file, delimiter=',', usecols=range(ncol-1))
    X = data[:,1:]
    y = data[:,0]
    n_samples, n_features = X.shape
    clf=svm.SVC(kernel='linear')
    clf.fit(X,y)
    y_pred = clf.predict(X)
    print "#########################################################################################################\n"
    print "C-Support Vector Classifcation (with linear kernel) "
    print "classification accuracy:", metrics.accuracy_score(y, y_pred)
    print "precision:", metrics.precision_score(y, y_pred)
    print "recall:", metrics.recall_score(y, y_pred)
    print "f1 score:", metrics.f1_score(y, y_pred)
    print "\n"
    print "#########################################################################################################\n"
    results = Output+"SVM_Linear_Kernel_metrics.txt"
    file = open(results, "w")
    file.write("Support Vector Machine with Linear Kernel estimator accuracy\n")
    file.write("Classification Accuracy Score: %f\n"%metrics.accuracy_score(y, y_pred))
    file.write("Precision Score: %f\n"%metrics.precision_score(y, y_pred))
    file.write("Recall Score: %f\n"%metrics.recall_score(y, y_pred))
    file.write("F1 Score: %f\n"%metrics.f1_score(y, y_pred))
    file.write("\n")
    file.write("True Value, Predicted Value, Iteration\n")
    for n in xrange(len(y)):
        file.write("%f,%f,%i\n"%(y[n],y_pred[n],(n+1)))
    file.close()
    title = "SVC - linear Kernel"
    save = Output + "SVC_linear_confusion_matrix.png"
    plot_confusion_matrix(y, y_pred,title,save)
    lvltrace.lvltrace("LVLSortie dans SVC_linear")
开发者ID:xaviervasques,项目名称:Neuron_Morpho_Classification_ML,代码行数:34,代码来源:supervised.py

示例12: run_model

def run_model(X_test, X_train, y_test, y_train, prob_threshold = 20, layers = 5, nodes = 64, dropout = 50):
    
    print "run_model RUNNING"
    # Grab the model 
    model = get_model(X_test, layers =layers, dropout = dropout)
    model.fit(X_train, y_train, nb_epoch=20, batch_size=16, verbose = 0)

    # Get the training and test predictions from our model fit. 
    train_predictions  = model.predict_proba(X_train)
    test_predictions = model.predict_proba(X_test)
    # Set these to either 0 or 1 based off the probability threshold we 
    # passed in (divide by 100 becuase we passed in intergers). 
    train_preds = (train_predictions) >= prob_threshold / 100.0
    test_preds = (test_predictions) >= prob_threshold / 100.0

    # Calculate the precision and recall. Only output until 
    precision_score_train = precision_score(y_train, train_preds)
    precision_score_test = precision_score(y_test, test_preds)
    acc_train = accuracy_score(y_train, train_preds)
    acc_test = accuracy_score(y_test, test_preds)

    recall_score_train = recall_score(y_train, train_preds)
    recall_score_test = recall_score(y_test, test_preds)

    return precision_score_train, precision_score_test, recall_score_train, recall_score_test, acc_train, acc_test, model
开发者ID:jessedow24,项目名称:-Clutch-Pitch-PITCHf-x,代码行数:25,代码来源:run_neural_net.py

示例13: create_all_eval_results

def create_all_eval_results(y_true,y_pred,key,system_features,sampling,replacement,num_of_samples):
    # precision = metrics.precision_score(y_true, y_pred, average='weighted')
    # recall = metrics.recall_score(y_true, y_pred, average='weighted')
    # F2 = calculateF2(precision, recall)
    name = data_names[key]

    y_true_bugs, y_pred_bugs = zip(*[[y_true[i], y_pred[i]] for i in range(len(y_true)) if y_true[i] == 1])
    # precision_bug, recall_bug, F_measure_bug ,_ = metrics.precision_recall_fscore_support(y_true_bugs,
    #                                                                                                  y_pred_bugs,
    #                                                                                                  average='micro')
    precision_bug =metrics.precision_score(y_true_bugs,y_pred_bugs,average='micro')
    recall_bug =metrics.recall_score(y_true_bugs,y_pred_bugs,average='micro')
    F2_bug = calculateF2(precision_bug,recall_bug)
    precision_bug_all, recall_bug_all,_ = metrics.precision_recall_curve(y_true_bugs, y_pred_bugs)
    prc_area_bug = metrics.auc(recall_bug_all, precision_bug_all)

    # precision, recall, F_measure,_ = metrics.precision_recall_fscore_support(y_true,
    #                                                                                                 y_pred,
    #                                                                                                 average='micro')
    precision = metrics.average_precision_score(y_true, y_pred, average='micro')
    recall = metrics.recall_score(y_true, y_pred, average='micro')
    F2 = calculateF2(precision, recall)
    precision_all, recall_all, _ = metrics.precision_recall_curve(y_true, y_pred)
    prc_area = metrics.auc(recall_all, precision_all)

    global results
    results.loc[len(results)] = [name,precision_bug,recall_bug,F2_bug,prc_area_bug, precision, recall,F2,prc_area,str(system_features),str(sampling),str(replacement),str(num_of_samples)]
开发者ID:amir9979,项目名称:Debugger,代码行数:27,代码来源:gan.py

示例14: score

def score(y_true, y_pred):
    precision_weighted = metrics.precision_score(
        y_true, y_pred, average='weighted')
    precision_ave = np.mean(metrics.precision_score(
        y_true, y_pred, average=None)[::12])

    recall_weighted = metrics.recall_score(
        y_true, y_pred, average='weighted')
    recall_ave = np.mean(metrics.recall_score(
        y_true, y_pred, average=None)[::12])

    f1_weighted = metrics.f1_score(
        y_true, y_pred, average='weighted')
    f1_ave = np.mean(metrics.f1_score(
        y_true, y_pred, average=None)[::12])

    stat_line = "  Precision: %0.4f\t Recall: %0.4f\tf1: %0.4f"
    res1 = "Weighted: " + stat_line % (100*precision_weighted,
                                       100*recall_weighted,
                                       100*f1_weighted)

    res2 = "Averaged: " + stat_line % (100*precision_ave,
                                       100*recall_ave,
                                       100*f1_ave)
    res3 = "-"*72
    outputs = [res3, res1, res2, res3]
    return "\n".join(outputs)
开发者ID:agangzz,项目名称:dl4mir,代码行数:27,代码来源:chroma.py

示例15: evaluate

def evaluate(ytest, ypred, filename='metrics.txt'):
    true_result = [1 if item > 0.5 else 0 for item in ytest]
    pred_result = [1 if item > 0.5 else 0 for item in ypred]
    
    cm = confusion_matrix(true_result, pred_result)
    print('\nConfusion matrix:')
    print(cm)
    print("\nLoss classified as loss", cm[0][0])
    print("Wins classified as wins", cm[1][1])
    print("Wins classified as loss", cm[1][0])
    print("Loss classified as wins", cm[0][1])
    print('\nAccuracy:\t', accuracy_score(true_result, pred_result))
    print('Precision:\t', precision_score(true_result, pred_result))
    print('Recall: \t', recall_score(true_result, pred_result))
    print('F1 score:\t', f1_score(true_result, pred_result))
    print('Mean absolute error:\t', mean_absolute_error(ytest, ypred))
    
    # print to file
    print("Loss classified as loss", cm[0][0], file=open(filename, "a"))
    print("Wins classified as wins", cm[1][1], file=open(filename, "a"))
    print("Wins classified as loss", cm[1][0], file=open(filename, "a"))
    print("Loss classified as wins", cm[0][1], file=open(filename, "a"))
    print('\nAccuracy:\t', accuracy_score(true_result, pred_result), file=open(filename, "a"))
    print('Precision:\t', precision_score(true_result, pred_result), file=open(filename, "a"))
    print('Recall: \t', recall_score(true_result, pred_result), file=open(filename, "a"))
    print('F1 score:\t', f1_score(true_result, pred_result), file=open(filename, "a"))
    print('Mean absolute error:\t', mean_absolute_error(ytest, ypred), file=open(filename, "a"))
开发者ID:alexandremcosta,项目名称:pucker,代码行数:27,代码来源:learn.py


注:本文中的sklearn.metrics.recall_score函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。