当前位置: 首页>>代码示例>>Python>>正文


Python BaggingClassifier.score方法代码示例

本文整理汇总了Python中sklearn.ensemble.BaggingClassifier.score方法的典型用法代码示例。如果您正苦于以下问题:Python BaggingClassifier.score方法的具体用法?Python BaggingClassifier.score怎么用?Python BaggingClassifier.score使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.BaggingClassifier的用法示例。


在下文中一共展示了BaggingClassifier.score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_oob_score_classification

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
def test_oob_score_classification():
    # Check that oob prediction is a good estimation of the generalization
    # error.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                        iris.target,
                                                        random_state=rng)

    for base_estimator in [DecisionTreeClassifier(), SVC()]:
        clf = BaggingClassifier(base_estimator=base_estimator,
                                n_estimators=100,
                                bootstrap=True,
                                oob_score=True,
                                random_state=rng).fit(X_train, y_train)

        test_score = clf.score(X_test, y_test)

        assert_less(abs(test_score - clf.oob_score_), 0.1)

        # Test with few estimators
        assert_warns(UserWarning,
                     BaggingClassifier(base_estimator=base_estimator,
                                       n_estimators=1,
                                       bootstrap=True,
                                       oob_score=True,
                                       random_state=rng).fit,
                     X_train,
                     y_train)
开发者ID:daniel-perry,项目名称:scikit-learn,代码行数:30,代码来源:test_bagging.py

示例2: main

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
def main():
    '''main function'''
    bagging = BaggingClassifier(DecisionTreeClassifier())
    iris = load_iris()
    x = iris.data
    y = iris.target
    #train, test, train_, test_ = train_test_split(x, y, test_size=0.2, random_state=42)
    bagging.fit(x, y)
    bagging.predict(x[:2])
    print(bagging.score(x[:2], y[:2]))
开发者ID:anmousyon,项目名称:uni,代码行数:12,代码来源:bagging.py

示例3: train_bagging

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
def train_bagging():
	model = build_model()
	bagging_model = BaggingClassifier(base_estimator=model,n_estimators=bagging_num_estimator,
	max_samples=bagging_sample_fraction,oob_score=bagging_use_oob)
	
	#train model
	bagging_model.fit(XC, yc) 
	
	#persist model
	if persist_model:
		models = bagging_model.estimators_
		for m in zip(range(0, len(models)), models):
			model_file = model_file_directory + "/" + model_file_prefix + "_" + str(m[0] + 1) + ".mod"
			joblib.dump(m[1], model_file) 

	score = bagging_model.score(XC, yc)
	print "average error %.3f" %(1.0 - score)
开发者ID:pranab,项目名称:avenir,代码行数:19,代码来源:svml.py

示例4: bagging

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
def bagging(df, dep_var, features, test):
    print 'Bagging'
    best = []
    #best_maxfeautures = []
    #best_n_estimators = []
    for sample in [1, 2, 3, 4, 5]:
        for x in range(1, 6):
            for n_est in range(3, 21, 3):
                start_time = time.time()
                clf = BaggingClassifier(max_features=x, max_samples=sample, n_estimators=n_est)
                clf.fit (df[features], df[dep_var])
                score = clf.score(test[features], test[dep_var])
                print 'sample: ', sample, ' Max_F: ', x, 'n estimators: ', n_est, score
                end_time = time.time()
                tm =end_time-start_time
                print 'Time: ', tm
                best.append([score, (x, sample, n_est, tm), clf])
    best.sort(reverse=True)
    print best[0]
    best = best[0]
    return {'score': best[0], 'max_features': best[1][0], 'max_sample': best[1][1] , 'n_estimators': best[1][2], 'time': best[1][3], 'clf': clf}
开发者ID:slarrain,项目名称:MachineLearning,代码行数:23,代码来源:pa3.py

示例5: cross_val_score

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
        br.fit(X, y)
        print 'Score BaggingRegressor = %s' % (br.score(X, y))
        scores_br = cross_val_score(br, X, y, cv=5)
        print 'Cross Val Scores of BR = %s' %(np.mean(scores_br))
        
    if name=='Iris' or name=='Digits': # Classificaiton problem
    
        rfc = RandomForestClassifier(**params)
        rfc.fit(X, y)
        print 'Score RandomForestClassifier = %s' % (rfc.score(X, y))
        scores_rfc = cross_val_score(rfc, X, y ,cv=5)
        print 'Corss Val Scores of RandomForestClassifier = %s' %(np.mean(scores_rfc))

        bc = BaggingClassifier(base_estimator=DecisionTreeClassifier(max_depth=max_depth), n_estimators=n_estimators)
        bc.fit(X, y)        
        print 'Score BaggingClassifier == %s' % (bc.score(X, y))
        scores_bc = cross_val_score(bc, X, y, cv=5)
        print 'Cross Val Scores of BaggingClassifier = %s' %(np.mean(scores_bc))

# *************************************
# Question 15
# *************************************

from utils import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.ensemble import (RandomForestClassifier, RandomForestRegressor)
from sklearn.ensemble import (BaggingClassifier, BaggingRegressor)
from sklearn.tree import (DecisionTreeClassifier, DecisionTreeRegressor)
from sklearn.utils import shuffle
开发者ID:Banaei,项目名称:ces-ds,代码行数:33,代码来源:random_forest.py

示例6: MultinomialNB

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
data = []
tfile = '../exp2_raw_data/train11w.data'
train = pd.read_csv(tfile,sep = '\t')

#preprocess
cateMap = {}
tmp = np.array(train[train['category_id']>0][['creative_id','category_id']])
for i,j in tmp:
    cateMap[i] = j

train['category_id'] = train['creative_id'].map(cateMap)
train = train.dropna(axis = 0)

#init train
x = np.array(train.drop(['qq','description','imp_time','pic_url','web_url', 'product_id','advertiser_id','series_id','creative_id','product_type','click_num', 'pos_id'], axis = 1))
y = np.array(train['click_num'])

xTrain, xTest, yTrain, yTest = cross_validation.train_test_split(x, y, test_size=0.1, random_state=0)

# some model

if __name__ == '__main__':
    # clf = MultinomialNB(alpha = 0.1)
    # clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth= 20, min_samples_split = 100 , class_weight = 'balanced')
    clf = BaggingClassifier(KNeighborsClassifier(),max_samples=0.5, max_features=0.5)
    # clf = AdaBoostClassifier(n_estimators=350, learning_rate=0.03)
    #clf = xgb.XGBClassifier(missing=np.nan, max_depth=5, n_estimators=350, learning_rate=0.03, nthread=4, subsample=0.95, colsample_bytree=0.85, seed=4242)

    clf.fit(xTrain, yTrain)
    print clf.score(xTrain, yTrain)
    print clf.score(xTest, yTest)
开发者ID:haleylu,项目名称:DIP,代码行数:33,代码来源:code.py

示例7: myclassify

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
def myclassify(numfiers=5,xtrain=xtrain,ytrain=ytrain,xtest=xtest,ytest=ytest):
    count = 0



    bagging2 = BaggingClassifier(ETC(),bootstrap=False,bootstrap_features=False)
    bagging2.fit(xtrain,ytrain)
    #print bagging2.score(xtest,ytest)
    count += 1
    classifiers = [bagging2.score(xtest,ytest)]

    if count < numfiers:

        tree2 = ETC()
        tree2.fit(xtrain,ytrain)
        #print tree2.fit(xtrain,ytrain)
        #print tree2.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,tree2.score(xtest,ytest))
        print "1"
        print tree2.score(xtest,ytest)

    if count < numfiers:
        bagging1 = BaggingClassifier(ETC())
        bagging1.fit(xtrain,ytrain)
        #print bagging1.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,bagging1.score(xtest,ytest))
        print "2"
        print bagging1.score(xtest,ytest)

#     if count < numfiers:
#         # votingClassifiers combine completely different machine learning classifiers and use a majority vote
#         clff1 = SVC()
#         clff2 = RFC(bootstrap=False)
#         clff3 = ETC()
#         clff4 = neighbors.KNeighborsClassifier()
#         clff5 = quadda()
#         print"3"


#         eclf = VotingClassifier(estimators = [('svc',clff1),('rfc',clff2),('etc',clff3),('knn',clff4),('qda',clff5)])
#         eclf = eclf.fit(xtrain,ytrain)
#         #print(eclf.score(xtest,ytest))
#         # for claf, label in zip([clff1,clff2,clff3,clff4,clff5,eclf],['SVC','RFC','ETC','KNN','QDA','Ensemble']):
#         #     cla
#         #     scores = crossvalidation.cross_val_score(claf,xtrain,ytrain,scoring='accuracy')
#         #     print ()
#         count+=1
#         classifiers = np.append(classifiers,eclf.score(xtest,ytest))


#     if count < numfiers:
#         svc1 = SVC()
#         svc1.fit(xtrain,ytrain)
#         dec = svc1.score(xtest,ytest)
#         count+=1
#         classifiers = np.append(classifiers,svc1.score(xtest,ytest))
#         print "3"

    if count < numfiers:
        # Quadradic discriminant analysis - classifier with quadratic decision boundary -
        qda = quadda()
        qda.fit(xtrain,ytrain)
        #print(qda.score(xtest,ytest))
        count+=1
        classifiers = np.append(classifiers,qda.score(xtest,ytest))
        print "4"


    if count < numfiers:

        tree1 = DTC()
        tree1.fit(xtrain,ytrain)
        #print tree1.fit(xtrain,ytrain)
        #print tree1.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,tree1.score(xtest,ytest))

    if count < numfiers:
        knn1 = neighbors.KNeighborsClassifier() # this classifies based on the #k nearest neighbors, where k is definted by the user.
        knn1.fit(xtrain,ytrain)
        #print(knn1.score(xtest,ytest))
        count+=1
        classifiers = np.append(classifiers,knn1.score(xtest,ytest))

    if count < numfiers:
        # linear discriminant analysis - classifier with linear decision boundary -
        lda = linda()
        lda.fit(xtrain,ytrain)
        #print(lda.score(xtest,ytest))
        count+=1
        classifiers = np.append(classifiers,lda.score(xtest,ytest))

    if count < numfiers:
        tree3 = RFC()
        tree3.fit(xtrain,ytrain)
        #print tree3.score(xtest,ytest)
        count+=1
        classifiers = np.append(classifiers,tree3.score(xtest,ytest))
#.........这里部分代码省略.........
开发者ID:nsantacruz,项目名称:Senior-Project,代码行数:103,代码来源:featureextract1122.py

示例8: range

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
from sklearn.ensemble import BaggingClassifier
from sklearn import datasets


if __name__ == '__main__':
    data = datasets.load_digits()
    X_train = data.data[:-20]
    y_train = data.target[:-20]
    X_test = data.data[-20:]
    y_test = data.target[-20:]
    for num in range(1,6):
        clf = BaggingClassifier(n_estimators=num, n_jobs=4)
        clf.fit(X_train, y_train)
        #y_pred = clf.predict(X_test)
        score = clf.score(X_test, y_test)
        print(num,score)
开发者ID:imaculate,项目名称:scikit-learn-tutorials,代码行数:18,代码来源:bagging_classifier_multiprocessing.py

示例9: ETC

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
# In[22]:

from sklearn.tree import ExtraTreeClassifier as ETC
tree2 = ETC()
print tree2
tree2.fit(xtrain,ytrain1)
print tree2.fit(xtrain,ytrain1)
print tree2.score(xtest,ytest1)


# In[23]:

from sklearn.ensemble import BaggingClassifier
bagging1 = BaggingClassifier(ETC())
bagging1.fit(xtrain,ytrain1)
print bagging1.score(xtest,ytest1)


# In[24]:

from sklearn.ensemble import BaggingClassifier
bagging2 = BaggingClassifier(ETC(),bootstrap=False,bootstrap_features=False)
bagging2.fit(xtrain,ytrain1)
print bagging2.score(xtest,ytest1)


# In[25]:

from sklearn.ensemble import RandomForestClassifier as RFC
tree3 = RFC()
tree3.fit(xtrain,ytrain1)
开发者ID:nsantacruz,项目名称:Senior-Project,代码行数:33,代码来源:Classification+stuff.py

示例10: SVM

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]

#.........这里部分代码省略.........
    master_faces = master_faces.reshape(len(master_faces), 32, 32)
    faces_test = faces_test.reshape(len(faces_test), 32, 32)
    plt.subplot(122), plt.imshow(faces_test[3], cmap="gray")
    plt.title("Normal"), plt.xticks([]), plt.yticks([])
    # plt.show()

    # Gamma correction
    hidden_faces = all_gamma(hidden_faces)

    master_faces = all_gamma(master_faces)
    faces_test = all_gamma(faces_test)
    plt.subplot(122), plt.imshow(faces_test[3], cmap="gray")
    plt.title("Gamma correction"), plt.xticks([]), plt.yticks([])
    # plt.show()

    # #Dog filter
    # master_faces -= cv2.GaussianBlur(master_faces, (3, 3),1)
    # faces_test -= cv2.GaussianBlur(faces_test, (3, 3),1)
    # plt.subplot(122),plt.imshow(faces_test[1], cmap='gray')
    # plt.title('Dog Filter'), plt.xticks([]), plt.yticks([])
    # plt.show()

    # #Rescale intensity
    # master_faces = testing(master_faces)
    # faces_test = testing(faces_test)
    #
    # plt.subplot(122),plt.imshow(faces_test[15], cmap='gray')
    # plt.title('Rescale'), plt.xticks([]), plt.yticks([])
    # plt.show()

    # Equalization of variance TODO
    hidden_faces = EQ(hidden_faces)
    master_faces = EQ(master_faces)

    faces_test = EQ(faces_test)
    plt.subplot(122), plt.imshow(faces_test[3], cmap="gray")
    plt.title("Equalization"), plt.xticks([]), plt.yticks([])
    # plt.show()

    # Reshape
    master_faces = master_faces.reshape((master_faces.shape[0], -1))
    faces_test = faces_test.reshape((faces_test.shape[0], -1))
    hidden_faces = hidden_faces.reshape((hidden_faces.shape[0], -1))

    tuples = kfold(master_faces, master_labels, master_ident, 13)
    success_rates_train = []
    success_rate_valid = []
    if not submit:
        for tuple in tuples:

            train_data, test_data, train_targets, test_targets, train_ident, test_ident = tuple
            # train_data = pca.transform(train_data)
            # test_data = pca.transform(test_data)

            classifier = svm.SVC(gamma=0.5, C=1, kernel="poly")
            model = BaggingClassifier(classifier, n_estimators=10, bootstrap=True, verbose=1)
            model.fit(train_data, train_targets)

            # Train
            score = model.score(train_data, train_targets)
            valid_score = model.score(test_data, test_targets)

            print("Training :")
            print(score)
            success_rates_train.append(score)

            # Validation
            print("Validation :")
            print(valid_score)
            success_rate_valid.append(valid_score)

        print("Training rates :")
        print(success_rates_train)
        print("Training average :")
        print(np.average(success_rates_train))

        print("Validation rates :")
        print(success_rate_valid)
        print("Validation average :")
        print(np.average(success_rate_valid))
    if submit:
        classification = svm.SVC(gamma=0.5, C=1, kernel="poly")
        model = BaggingClassifier(classification, n_estimators=20, bootstrap_features=True, bootstrap=True, verbose=1)
        model.fit(master_faces, master_labels)
        test_predictions = model.predict(faces_test)
        hidden_predictions = model.predict(hidden_faces)

        # Test predictions

        ascending = np.zeros(1253)

        for i in range(len(ascending)):
            ascending[i] = i + 1
        ascending = ascending.astype(int)
        hidden_guesses = hidden_predictions
        test_predictions = np.concatenate([test_predictions, hidden_guesses])
        test_predictions = test_predictions.astype(int)
        csv = np.column_stack((ascending, test_predictions))
        np.savetxt("hidden.csv", csv, delimiter=",")
    return
开发者ID:c2mcwate,项目名称:CSC411_A3,代码行数:104,代码来源:trainedmodel.py

示例11: export_graphviz

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
#0.904761904762
export_graphviz(ctree, out_file='ctree_entropy.dot',
                feature_names=words, class_names=author_names,
                filled=True, rounded=True,
                special_characters=True)
graph_gini = pydot.graph_from_dot_file('ctree_entropy.dot')
graph_gini.write_png('ctree_entropy.png')
# feature evaluation
ind_entropy = np.argsort(ctree.feature_importances_)
features_entropy = np.array(words)[ind_entropy][::-1]

###############################################################################
# Bagging
bagging = BaggingClassifier()
bagging.fit(training_data, training_label)
err_bag_tr =  bagging.score(training_data, training_label)
err_bag_ts =  bagging.score(test_data,test_label)
#0.996604414261
#0.94444444444


###############################################################################
# Boosting
# AdaBoost
adaboost = AdaBoostClassifier()
adaboost.fit(training_data, training_label)
err_ada_tr =  adaboost.score(training_data, training_label)
err_ada_ts =  adaboost.score(test_data,test_label)
#0.9015280135823429
#0.8134920634920634
ind_adaboost = np.argsort(adaboost.feature_importances_)
开发者ID:AkiraKane,项目名称:STAT640_Statistical_Machine_Learning,代码行数:33,代码来源:trees_hw04.py

示例12: skrc

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
proba=pd.DataFrame(rf.predict_proba(x_test))[1]
false_positive_rate, true_positive_rate, thresholds = skrc(y_test,proba)
auc(false_positive_rate, true_positive_rate)

#Extra Trees Accuracy (not as good as random forest)
et = ExtraTreesClassifier(class_weight='balanced')
et.fit(x_train,y_train)
et.score(x_test,y_test)
proba=pd.DataFrame(et.predict_proba(x_test))[1]
false_positive_rate, true_positive_rate, thresholds = skrc(y_test,proba)
auc(false_positive_rate, true_positive_rate)

#Bagging Accuracy (Competitive for best depending on features)
bc = BaggingClassifier(dt)
bc.fit(x_train,y_train)
bc.score(x_test,y_test)
proba=pd.DataFrame(bc.predict_proba(x_test))[1]
false_positive_rate, true_positive_rate, thresholds = skrc(y_test,proba)
auc(false_positive_rate, true_positive_rate)

#Boosting Accuracy (worst)
#also takes too long to build model, avoid
ab = AdaBoostClassifier(dt)
ab.fit(x_train,y_train)
ab.score(x_test,y_test)
proba=pd.DataFrame(ab.predict_proba(x_test))[1]
false_positive_rate, true_positive_rate, thresholds = skrc(y_test,proba)
auc(false_positive_rate, true_positive_rate)

#Gradient Boosting Accuracy (Competitive for best depending on features)
gb = GradientBoostingClassifier()
开发者ID:absulier,项目名称:waterquality,代码行数:33,代码来源:test_model.py

示例13: read_data

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
# this file tests bagging on various algorithms

from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier
from dlinghu_functions import *


x_train, y_train, x_test = read_data()
svm = SVC(C=128.0, gamma=8.0)
svm.fit(x_train, y_train)
print_cv_scores(svm, x_train, y_train)
#########################################################
# test bagging sample ratio, without replacement
for max_sample in np.arange(0.1, 1.0, 0.1):
    print 'max_sample ratio = %s' % max_sample
    svm_bagging = BaggingClassifier(svm, bootstrap=False, max_samples=max_sample, n_estimators=50)
    svm_bagging.fit(x_train, y_train)
    # test bagging
    print "In-sample score = %s" % svm_bagging.score(x_train, y_train)
    print_cv_scores(svm_bagging, x_train, y_train)
#########################################################
svm_bagging = BaggingClassifier(svm, bootstrap=True, n_estimators=50)
svm_bagging.fit(x_train, y_train)
print_cv_scores(svm_bagging, x_train, y_train)
开发者ID:chrinide,项目名称:us,代码行数:26,代码来源:dlinghu_Bagging.py

示例14: RandomForestClassifier

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
            print 'Cross Val : std = %s' %(diabetes[i,6])
            
        if name=='Iris': # Classificaiton problem
        
            rfc = RandomForestClassifier(**params)
            rfc.fit(X, y)
            scores_rfc = cross_val_score(rfc, X, y ,cv=5)

            bc = BaggingClassifier(base_estimator=DecisionTreeClassifier(max_depth=max_depth), n_estimators=n_estimators)
            bc.fit(X, y)        
            scores_bc = cross_val_score(bc, X, y, cv=5)

            iris[i,1] = rfc.score(X, y)
            iris[i,2] = np.mean(scores_rfc)
            iris[i,3] = np.std(scores_rfc)
            iris[i,4] = bc.score(X, y)
            iris[i,5] = np.mean(scores_bc)
            iris[i,6] = np.std(scores_bc)

            print 'Score RandomForestClassifier = %s' % (iris[i,1])
            print 'Corss Val : mean = %s' %(iris[i,2])
            print 'Corss Val : std = %s' %(iris[i,3])
            print 'Score BaggingClassifier == %s' % (iris[i,4])
            print 'Cross Val : mean = %s' %(iris[i,5])
            print 'Cross Val : std = %s' %(iris[i,6])
            
        if name=='Digits': # Classificaiton problem
        
            rfc = RandomForestClassifier(**params)
            rfc.fit(X, y)
            scores_rfc = cross_val_score(rfc, X, y ,cv=5)
开发者ID:Banaei,项目名称:ces-ds,代码行数:33,代码来源:rendu_eval_Alireza_Banaei.py

示例15: RandomForestClassifier

# 需要导入模块: from sklearn.ensemble import BaggingClassifier [as 别名]
# 或者: from sklearn.ensemble.BaggingClassifier import score [as 别名]
                           max_samples=0.1)
bagged.fit(x_train, y_train)


# initialize a random forest classifier 
print 'Training random forest...'
rfc = RandomForestClassifier(n_estimators=200,
                             max_features=40,
                             min_samples_split=2,
                             min_samples_leaf=1)
rfc.fit(x_train, y_train)

# training scores
print "Training scores..."
print bdt.score(x_train, y_train)
print bagged.score(x_train, y_train)
print rfc.score(x_train, y_train)

# score the classfier on the test set 
# print "Scoring..."
# print bdt.score(x_test, y_test)
# print bagged.score(x_test, y_test)
# print rfc.score(x_test, y_test)

# print "Writing predictions..."
predictions1 = bdt.predict(x_test)
predictions2 = bagged.predict(x_test)
predictions3 = rfc.predict(x_test)
predictions = []

for i in range(100):
开发者ID:leiyama,项目名称:MICCAI_2014_MLC,代码行数:33,代码来源:training.py


注:本文中的sklearn.ensemble.BaggingClassifier.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。