当前位置: 首页>>代码示例>>Python>>正文


Python VotingClassifier.score方法代码示例

本文整理汇总了Python中sklearn.ensemble.VotingClassifier.score方法的典型用法代码示例。如果您正苦于以下问题:Python VotingClassifier.score方法的具体用法?Python VotingClassifier.score怎么用?Python VotingClassifier.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.VotingClassifier的用法示例。


在下文中一共展示了VotingClassifier.score方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
def main(path,filename):

	batchsT = ['histogramaByN','histogramaColor','patrones2x2ByN','patrones3x3ByN','patronesCirculaesByN_2_5','patronesCirculaesByN_2_9','patronesCirculaesByN_3_9','patronesCirculaesByN_5_9','patronesCirculaesByN_3_5']
	batchsAux = ['histogramaByN','histogramaColor','patronesCirculaesByN_2_5','patrones2x2ByN','patrones3x3ByN','patronesCirculaesByN_2_9','patronesCirculaesByN_3_9','patronesCirculaesByN_5_9','patronesCirculaesByN_3_5','patronesCirculaesByN_6_12','patronesCirculaesByN_8_12']
	#batchs = ['patrones2x2ByN','patrones3x3ByN','patronesCirculaesByN_2_5','patronesCirculaesByN_2_9']
	#batchs = ['patrones2x2ByN','patrones3x3ByN','patronesCirculaesByN_2_5','patronesCirculaesByN_3_5']
	#for batch in batchsAux:


	#print batch
	batchs = batchsAux
	#batchs.remove(batch)
	X = []
	y = []
	load_batch(y,path,'clases',filename) 
	y = [j for i in y for j in i]
	for batch in batchs:
		load_batch(X,path,batch,filename)
	
	#X,y = load_images('/tmp/train/')
	est = [RandomForest(),Boosting()]
	for i in xrange(0,15):
		est.append(Gradient(i))
	for i in xrange(0,4):
		est.append(SVM(i))

	#scores = cross_validation.cross_val_score(clf, X, y, cv=5)
	#print scores
	clf = VotingClassifier(estimators=est)

	clf.fit(X,y)
	pickle.dump( clf, open( "clf_grande.p", "wb" ) )
	return
	X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, y, test_size=0.2,random_state=777)
	#print clf.sub_score(X_test,Y_test)
	print 'start'
	conf_matrix = metrics.confusion_matrix(Y_test,clf.predict(X_test))
	print 'confution matrix'
	print conf_matrix
	return
	for name,estim in est:
		print name
		#estim.fit(X_train,Y_train)
		#print estim.score(X_test,Y_test)
		print cross_validation.cross_val_score(estim, X, y, cv=5,n_jobs=-1)
	print 'voter'
	print cross_validation.cross_val_score(clf, X, y, cv=5,n_jobs=-1)
	return
	#clf.fit(X_train,Y_train)
	print clf.score(X_test,Y_test)

	return
开发者ID:fcanay,项目名称:MachineLearning,代码行数:54,代码来源:src.py

示例2: vclas

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
def vclas(w1,w2,w3, w4, w5):
    Xtrain,Xtest, ytrain,ytest= cv.train_test_split(trainX,trainY,test_size=0.4)

    clf1 = LogisticRegression()
    clf2 = GaussianNB()
    clf3 = RandomForestClassifier(n_estimators=10,bootstrap=True)
    clf4= ExtraTreesClassifier(n_estimators=10, bootstrap=True)
    clf5 = GradientBoostingClassifier(n_estimators=10)

    clfes=[clf1,clf2,clf3,clf4, clf5]

    eclf = VotingClassifier(estimators=[('lr', clf1), ('gnb', clf2), ('rf', clf3),('et',clf4), ('gb',clf5)],
                            voting='soft',
                            weights=[w1, w2, w3,w4, w5])

    [c.fit(Xtrain, ytrain) for c in (clf1, clf2, clf3,clf4, clf5, eclf)]
 
    N = 6
    ind = np.arange(N)
    width = 0.3
    fig, ax = plt.subplots()

    for i, clf in enumerate(clfes):
        print(clf,i)
        p1=ax.bar(i,clfes[i].score(Xtrain,ytrain,), width=width,color="blue", alpha=0.5)
        p2=ax.bar(i+width,clfes[i].score(Xtest,ytest,), width=width,color="red", alpha=0.5)
    ax.bar(len(clfes)+width,eclf.score(Xtrain,ytrain,), width=width,color="blue", alpha=0.5)
    ax.bar(len(clfes)+width *2,eclf.score(Xtest,ytest,), width=width,color="red", alpha=0.5)
    plt.axvline(4.8, color='k', linestyle='dashed')
    ax.set_xticks(ind + width)
    ax.set_xticklabels(['LogisticRegression',
                        'GaussianNB',
                        'RandomForestClassifier',
                        'ExtraTrees',
                        'GradientBoosting',
                        'VotingClassifier'],
                       rotation=40,
                       ha='right')
    plt.title('Training and Test Score for Different Classifiers')
    plt.legend([p1[0], p2[0]], ['training', 'test'], loc='lower left')
    plt.show()
开发者ID:JPico6,项目名称:National-Survey-on-Drug-Use-and-Health,代码行数:43,代码来源:Step3_ImprovingModels.py

示例3: run_voting

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
def run_voting(training_set, train_set_labels, validation_set, validation_set_labels):
    from sklearn.ensemble import VotingClassifier
    standard_train_inputs = standard_data(training_set)
    standard_valid_inputs = standard_data(validation_set)
    kknn_class = KNeighborsClassifier(weights='uniform', n_neighbors=5)

    logistic_regression_solver = sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.01, C=1.0, fit_intercept=True,
                                                                         intercept_scaling=1, class_weight=None, random_state=None, solver='newton-cg',
                                                                         max_iter=100, multi_class='ovr', verbose=0, warm_start=False, n_jobs=2)
    svm_class = svm.SVC(decision_function_shape='ovo', tol=0.001)
    eclf1 = VotingClassifier(estimators=[('knn', kknn_class), ('lr', logistic_regression_solver), ('svm', svm_class)], voting='hard')
    eclf1.fit(standard_train_inputs,train_set_labels.ravel())

    accuracy = eclf1.score(standard_valid_inputs,validation_set_labels.ravel())
    print accuracy
开发者ID:Myers-Omri,项目名称:CSC411_Project,代码行数:17,代码来源:run_classifier.py

示例4: do_ml

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
def do_ml(ticker):
    X, y, df = extract_featuresets(ticker)
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.25)


    #clf = neighbors.KNeighborsClassifier()
    clf = VotingClassifier([('lsvc', svm.LinearSVC()),
                            ('knn', neighbors.KNeighborsClassifier()),
                            ('rfor', RandomForestClassifier())] )

    clf.fit(X_train, y_train)
    confidence = clf.score(X_test, y_test)
    print('Accuracy', confidence)
    predictions = clf.predict(X_test)
    print('Predicted spread:', Counter(predictions))

    return confidence
开发者ID:wpr101,项目名称:SentiSmart,代码行数:19,代码来源:PreProcessData.py

示例5: runModel

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
model = runModel(model=model, trainX=X_train[0:30000], trainY=y_train[0:30000],
                 optimize=False, parameters=None, scoring='roc_auc')


print "Applying Model ..."
start = time()
y_pred = model.predict(X_test)
print("Model took %.2f seconds to predict vals" % (time() - start))


### Evaluation
print "Scoring Classifier..."
start = time()

score = model.score(X_test, y_test)
recall = metrics.recall_score(y_test, y_pred, average='binary')
auc = metrics.roc_auc_score(y_test, y_pred, average='macro')
confusion = metrics.confusion_matrix(y_test, y_pred, labels=[0, 1])

print "Score: \t \t Recall: \t AUC:\n", score, recall, auc
print("Model took %.2f seconds to score" % (time() - start))

if plot_roc:

    fpr, tpr, thrsh = metrics.roc_curve(y_test, y_pred, pos_label=1)

    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
开发者ID:migueltp,项目名称:machinelearning,代码行数:32,代码来源:main.py

示例6: print

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
"orig_destination_distance", "srch_ci", "srch_co"]
features = [column for column in features if column not in removelist]
print("The features considered are:")
print(features)

start_time = timeit.default_timer()

# Create and fit a decision tree to the set of data in those features
y = trainFull["hotel_cluster"] 
X = trainFull[features]

rf = RandomForestClassifier(n_estimators=20, n_jobs=-1, max_features=None, min_samples_split=250)
ovr = OneVsRestClassifier(RandomForestClassifier(n_estimators=10, n_jobs=-1, max_features=None, min_samples_split=250), n_jobs=-1)
dt = DecisionTreeClassifier(min_samples_split=250, criterion="entropy")

vc = VotingClassifier(estimators=[('rf', rf), ('ovr', ovr), ('dt', dt)], voting='hard')
vc.fit(X, y)

# Measure ability to predict the right hotel clust for a new subset
testX = test_set[features]
testy = test_set["hotel_cluster"]
prediction = vc.predict(testX)

report = classification_report(testy, prediction, digits=5)
print(report)

elapsed = timeit.default_timer() - start_time
print(elapsed)

score = vc.score(testX, testy)
print("Score is " + str(score))
开发者ID:bomanimc,项目名称:ExpediaChallenge,代码行数:33,代码来源:voting.py

示例7: myclassify

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
def myclassify(numfiers,xtrain,ytrain,xtest,ytest):
    count = 0
    print numfiers

    ytrain = np.ravel(ytrain)
    ytest = np.ravel(ytest)


    bagging2 = BaggingClassifier(ETC(),bootstrap=False,bootstrap_features=False)
    bagging2.fit(xtrain,ytrain)
    #print bagging2.score(xtest,ytest)
    count += 1
    classifiers = [bagging2.score(xtest,ytest)]
    print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%"


    if count < numfiers:

        tree2 = ETC()
        tree2.fit(xtrain,ytrain)
        #print tree2.fit(xtrain,ytrain)
        #print tree2.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,tree2.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"

    if count < numfiers:
        bagging1 = BaggingClassifier(ETC())
        bagging1.fit(xtrain,ytrain)
        #print bagging1.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,bagging1.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"

    if count < numfiers:
        # votingClassifiers combine completely different machine learning classifiers and use a majority vote
        clff1 = SVC()
        clff2 = RFC(bootstrap=False)
        clff3 = ETC()
        clff4 = neighbors.KNeighborsClassifier()
        clff5 = quadda()



        eclf = VotingClassifier(estimators = [('svc',clff1),('rfc',clff2),('etc',clff3),('knn',clff4),('qda',clff5)])
        eclf = eclf.fit(xtrain,ytrain)
        #print(eclf.score(xtest,ytest))
        # for claf, label in zip([clff1,clff2,clff3,clff4,clff5,eclf],['SVC','RFC','ETC','KNN','QDA','Ensemble']):
        #     cla
        #     scores = crossvalidation.cross_val_score(claf,xtrain,ytrain,scoring='accuracy')
        #     print ()
        count+=1
        classifiers = np.append(classifiers,eclf.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"


    if count < numfiers:
        svc1 = SVC()
        svc1.fit(xtrain,ytrain)
        dec = svc1.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,svc1.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"

    if count < numfiers:
        # Quadradic discriminant analysis - classifier with quadratic decision boundary -
        qda = quadda()
        qda.fit(xtrain,ytrain)
        #print(qda.score(xtest,ytest))
        count+=1
        classifiers = np.append(classifiers,qda.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"



    if count < numfiers:

        tree1 = DTC()
        tree1.fit(xtrain,ytrain)
        #print tree1.fit(xtrain,ytrain)
        #print tree1.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,tree1.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"

    if count < numfiers:
        knn1 = neighbors.KNeighborsClassifier() # this classifies based on the #k nearest neighbors, where k is definted by the user.
        knn1.fit(xtrain,ytrain)
        #print(knn1.score(xtest,ytest))
        count+=1
        classifiers = np.append(classifiers,knn1.score(xtest,ytest))
        print "percentage classifcation complete: %s" % str(round(100*(float(count)/numfiers))) + "%" + "   " + str(numfiers-count) + "classifiers left to train"


    if count < numfiers:
        # linear discriminant analysis - classifier with linear decision boundary -
        lda = linda()
        lda.fit(xtrain,ytrain)
        #print(lda.score(xtest,ytest))
        count+=1
#.........这里部分代码省略.........
开发者ID:nsantacruz,项目名称:Senior-Project,代码行数:103,代码来源:classtest.py

示例8: GBC

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
tree6 = GBC()
tree6.fit(xtrain,ytrain1)
print(tree6.score(xtest,ytest1))
# look at n_estimators and change that along with changing warmstart to be true


# In[31]:

# votingClassifiers combine completely different machine learning classifiers and use a majority vote
clff1 = SVC()
clff2 = RFC(bootstrap=False)
clff3 = ETC()
clff4 = neighbors.KNeighborsClassifier()
clff5 = quadda()
from sklearn.ensemble import VotingClassifier
from sklearn import cross_validation
eclf = VotingClassifier(estimators = [('svc',clff1),('rfc',clff2),('etc',clff3),('knn',clff4),('qda',clff5)])
eclf = eclf.fit(xtrain,ytrain1)
print(eclf.score(xtest,ytest1))
# for claf, label in zip([clff1,clff2,clff3,clff4,clff5,eclf],['SVC','RFC','ETC','KNN','QDA','Ensemble']):
#     cla
#     scores = crossvalidation.cross_val_score(claf,xtrain,ytrain1,scoring='accuracy')
#     print ()
    


# In[ ]:



开发者ID:nsantacruz,项目名称:Senior-Project,代码行数:29,代码来源:Classification+stuff.py

示例9: GradientBoostingClassifier

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
cl3 = GradientBoostingClassifier(n_estimators=1000, learning_rate=1,
max_depth=10, random_state=0, min_samples_split=5)
cl4 = GaussianNB()
cl5 = MLPClassifier(algorithm='adam', alpha=0.01, max_iter=500,
	learning_rate='constant', hidden_layer_sizes=(400,), 
	random_state=0, learning_rate_init=1e-2,
	activation='logistic')


eclf1 = VotingClassifier(estimators=[
('rf', cl1), ('svc', cl2), ('gbc', cl3),
('gnb',cl4),('mlp',cl5)
], voting='hard')

eclf1 = eclf1.fit(X, Y.values.ravel())
print ("Accuracy of Voting Ensemble: "+str(eclf1.score(P,Q)))



clf5 = SGDClassifier(loss="perceptron", penalty="elasticnet", 
	random_state=0).fit(X, Y.values.ravel())
print ("Accuracy of SGDClassifier: "+str(clf5.score(P,Q)))

gbc = GradientBoostingClassifier(loss='exponential').fit(X, Y.values.ravel())
adaboost = AdaBoostClassifier(n_estimators=10000, learning_rate=100).fit(X, Y.values.ravel())
print ("Accuracy of GBC: "+str(gbc.score(P,Q)))
print ("Accuracy of Adaboost: "+str(adaboost.score(P,Q)))


### Calculate MSE of different models
rf = clf.predict(P)
开发者ID:alifar76,项目名称:TFMicrobiome,代码行数:33,代码来源:extras.py

示例10: SelectFwe

# 需要导入模块: from sklearn.ensemble import VotingClassifier [as 别名]
# 或者: from sklearn.ensemble.VotingClassifier import score [as 别名]
    SelectFwe(score_func=f_classif, alpha=0.04),
    RandomForestClassifier(criterion="entropy",  max_features=0.6000000000000001, min_samples_split=5, n_estimators=100)
)

# 0.82
#clf4 = exported_pipeline = make_pipeline(
#    StackingEstimator(estimator=LogisticRegression(C=1.0, dual=True)),
#    RandomForestClassifier(max_features=0.6000000000000001, min_samples_leaf=20, min_samples_split=18)
#)

#eclf1 = VotingClassifier(estimators=[
#         ('lr', clf1), ('rf', clf2), ('gnb', clf3), ('rnd', clf4)], voting='hard')
eclf1 = VotingClassifier(estimators=[
         ('lr', clf1), ('gnb', clf2), ('rnd', clf3)], voting='hard')
eclf1 = eclf1.fit(X_train, y_train)
print(eclf1.score(X_test, y_test))

model1 = clf1.fit(X_train, y_train)
print(model1.score(X_test, y_test))

model2 = clf2.fit(X_train, y_train)
print(model2.score(X_test, y_test))

model3 = clf3.fit(X_train, y_train)
print(model3.score(X_test, y_test))

#model4 = clf4.fit(X_train, y_train)
#print(model4.score(X_test, y_test))

#tpot = TPOTClassifier(generations=20, population_size=50, verbosity=2)
#tpot.fit(X_train, y_train)
开发者ID:jaimevalero,项目名称:jupyter-learning,代码行数:33,代码来源:2-teaport-VotingClasiffier.py


注:本文中的sklearn.ensemble.VotingClassifier.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。