当前位置: 首页>>代码示例>>Python>>正文


Python ExtraTreesRegressor.score方法代码示例

本文整理汇总了Python中sklearn.ensemble.ExtraTreesRegressor.score方法的典型用法代码示例。如果您正苦于以下问题:Python ExtraTreesRegressor.score方法的具体用法?Python ExtraTreesRegressor.score怎么用?Python ExtraTreesRegressor.score使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.ExtraTreesRegressor的用法示例。


在下文中一共展示了ExtraTreesRegressor.score方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dummie_columns_extra_trees

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
def dummie_columns_extra_trees(train, test):
    from sklearn.ensemble import ExtraTreesRegressor
    print "-- {} --".format("Extremely Randomized Trees Regression using all but remarks")
    predicting_columns = list(train._get_numeric_data().columns.values)
    predicting_columns.remove("LISTPRICE")
    predicting_columns.remove("SOLDPRICE")
    rf = ExtraTreesRegressor(
        n_estimators=300, n_jobs=-1)
    rf.fit(train[predicting_columns], train["SOLDPRICE"])
    score = rf.score(test[predicting_columns], test["SOLDPRICE"])
    predictions = rf.predict(test[predicting_columns])
    sample_predictions(test, predictions)
    print "Accuracy: {}\n".format(score)
    return score, predictions
开发者ID:CurleySamuel,项目名称:Thesis,代码行数:16,代码来源:first_pass.py

示例2: simple_extremely_random_trees

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
def simple_extremely_random_trees(data_train_x, data_test_x, data_train_y, data_test_y):
    from sklearn.ensemble import ExtraTreesRegressor
    print "-- {} --".format("Extremely Randomized Trees Regression using all but remarks")
    rf = ExtraTreesRegressor(
        n_estimators=300,
        n_jobs=-1
    )
    rf.fit(data_train_x, data_train_y)
    sample_predictions(rf.predict(data_test_x), data_test_y)
    score = rf.score(data_test_x, data_test_y)
    cross_validated_scores = cross_val_score(
        rf, data_test_x, data_test_y, cv=5)
    print "MSE Accuracy: {}".format(score)
    print "MSE Across 5 Folds: {}".format(cross_validated_scores)
    print "95%% Confidence Interval: %0.3f (+/- %0.3f)\n" % (cross_validated_scores.mean(), cross_validated_scores.std() * 1.96)
开发者ID:CurleySamuel,项目名称:Thesis,代码行数:17,代码来源:second_pass.py

示例3: trainRegressorsAndSave

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
def trainRegressorsAndSave(computeScore=False):
    for db in dbs:
        if (not os.path.exists("clfs/" + db)):
            clf = ExtraTreesRegressor(n_estimators=500, random_state=1, n_jobs=-1)
            saveTrainedClassifier(db, clf)
        elif (computeScore):
            clf = joblib.load("clfs/" + db)

        if (computeScore):
            print("Loading test data...")
            loaded = loadDB(db + ".csv")
            X_test = loaded[:, 0:-1]
            y_test = loaded[:, -1]

            print("Normalized score is {}".format(clf.score(X_test, y_test)))
            X_test = y_test = 0
开发者ID:dtaralla,项目名称:hearthstone,代码行数:18,代码来源:hearthstone_utils.py

示例4: GradientBoostingRegressor

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
etr_y_predict = etr.predict(X_test)

# 使用GradientBoostingRegressor训练模型,并对测试数据做出预测,结果存储在变量gbr_y_predict中。
gbr = GradientBoostingRegressor()
gbr.fit(X_train, y_train)
gbr_y_predict = gbr.predict(X_test)

from sklearn.metrics import mean_absolute_error,mean_squared_error
# 使用R-squared、MSE以及MAE指标对默认配置的随机回归森林在测试集上进行性能评估。
print('R-squared value of RandomForestRegressor:', rfr.score(X_test, y_test))
print( 'The mean squared error of RandomForestRegressor:', mean_squared_error(y_test, rfr_y_predict))
print( 'The mean absoluate error of RandomForestRegressor:', mean_absolute_error(y_test, rfr_y_predict))


# 使用R-squared、MSE以及MAE指标对默认配置的极端回归森林在测试集上进行性能评估。
print('R-squared value of ExtraTreesRegessor:', etr.score(X_test, y_test))
print('The mean squared error of  ExtraTreesRegessor:', mean_squared_error(y_test,etr_y_predict))
print('The mean absoluate error of ExtraTreesRegessor:', mean_absolute_error(y_test, etr_y_predict))

# 利用训练好的极端回归森林模型,输出每种特征对预测目标的贡献度。
print(zip(etr.feature_importances_, boston.feature_names))
featrue_importance = zip(etr.feature_importances_, boston.feature_names)
print(np.sort(list(featrue_importance), axis= 0))
# 使用R-squared、MSE以及MAE指标对默认配置的梯度提升回归树在测试集上进行性能评估。
print('R-squared value of GradientBoostingRegressor:', gbr.score(X_test, y_test))
print('The mean squared error of GradientBoostingRegressor:', mean_squared_error(y_test, gbr_y_predict))
print('The mean absoluate error of GradientBoostingRegressor:', mean_absolute_error(y_test, gbr_y_predict))


# 许多业界从事商业分析系统开发和搭建的工作者更加青睐于集成模型,
#并经常以这些模型的性能表现为基准,与新设计的其他模型性能进行比对。
开发者ID:yunsong86,项目名称:book1,代码行数:33,代码来源:rf_er_gbr.py

示例5: open

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
with open('model.txt','wt') as f:
  print >> f, xfr
with open('estimators_.txt','wt') as f:
  #f.write(xfr.estimators_)
  print >> f, xfr.estimators_
with open('feature_importances_.txt','wt') as f:
  print >> f, xfr.feature_importances_
#with open('oob_score_.txt','wt') as f:
  #print >> f, xfr.oob_score_
#with open('oob_prediction_.txt','wt') as f:
  #print >> f, xfr.oob_prediction_

predict_loc_regres = xfr.predict(data_test)
if 'target_test' in locals():
  score = xfr.score(data_test,target_test)
  gn = normalized_weighted_gini(target_test,predict_loc_regres,data_test.var11)
end = time.clock()

#outdf = pd.DataFrame([data_test.ix[:,'id']])
if 'target_test' in locals():
  target_test.columns = ['true_target']
  outdf = pd.concat([data_test.ix[:,'id'].astype(int),pd.DataFrame(predict_loc_regres,columns=['target']),target_test],axis=1)
else:
  outdf = pd.concat([data_test.ix[:,'id'].astype(int),pd.DataFrame(predict_loc_regres,columns=['target'])],axis=1)

out_filename = (os.path.splitext(os.path.basename(sys.argv[1]))[0]+"_predict.csv")
outdf.to_csv(out_filename,index=0)
if 'target_test' in locals():
  print out_filename, score , gn
else:
开发者ID:kirilligum,项目名称:cdips-fire,代码行数:32,代码来源:splitcheckxfc.py

示例6: print

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
    score = gbr.score(X_test, Y_test)
    print('Problem 2 part 4 Test score : {}'.format(score))
    
    
    
    etr = ExtraTreesRegressor(n_estimators=100, max_depth=8,min_samples_leaf=2 )
    
    etr.fit(X_train, Y_train)
    
    Y_etr = etr.predict(X_test)    
    score = r2_score(Y_test.values, Y_etr)
    print('Problem 2 part 5a Test score : {}'.format(score))
    

    score = etr.score(X_test, Y_test)
    print('Problem 2 part 5b Test score : {}'.format(score))
    



if(runProblem3):
    from keras.models import Sequential
    from keras.layers.core import Activation, Dense, Dropout
    from keras.callbacks import EarlyStopping


    #X = dataset[['Feature_5', 'Feature_7','Ret_MinusTwo', 'Ret_MinusOne']+['Ret_{}'.format(i) for i in range(2,121)]]
    #Y = dataset['Ret_MinusZero']

    #X['Feature_5'] = (X['Feature_5'] - np.mean(X['Feature_5']))/np.std(X['Feature_5'])
开发者ID:stavka,项目名称:ml-hw,代码行数:32,代码来源:final_project.py

示例7: __init__

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
class mixmodels:
    def __init__(self,nest=10):
        self.nest = nest
    def fit(self,data_train,target):
        self.target_train = target
        self.catcol = data_train.filter(like='var').columns.tolist()
        #start_gbr_tr = time.clock()
        self.gbr = GradientBoostingRegressor(n_estimators =self.nest,max_depth=7)
        self.gbr.fit(data_train,self.target_train)
        self.transformed_train_gbr = self.gbr.transform(data_train,threshold="0.35*mean")
        self.gbr_tr_fit = GradientBoostingRegressor(n_estimators =self.nest,max_depth=7)
        self.gbr_tr_fit.fit(self.transformed_train_gbr,self.target_train)
        #end_gbr_tr = time.clock()
        #print >> log, "time_gbr_tr = ", end_gbr_tr-start_gbr_tr

        #start_xfr_tr = time.clock()
        self.xfr= ExtraTreesRegressor(n_estimators =self.nest,max_depth=7)
        self.xfr.fit(data_train,self.target_train)
        self.transformed_train_xfr = self.xfr.transform(data_train,threshold="0.35*mean")
        self.xfr_tr_fit = ExtraTreesRegressor(n_estimators =self.nest,max_depth=7)
        self.xfr_tr_fit.fit(self.transformed_train_xfr,self.target_train)
        #end_xfr_tr = time.clock()
        #print >> log, "time_xfr_tr = ", end_xfr_tr-start_xfr_tr

        #start_gbr_cat = time.clock()
        self.gbr_cat_fit = GradientBoostingRegressor(n_estimators =self.nest,max_depth=7)
        self.gbr_cat_fit.fit(data_train[self.catcol],self.target_train)
        #end_gbr_cat = time.clock()
        #print >> log, "time_gbr_cat = ", end_gbr_cat-start_gbr_cat

        #start_xfr_cat = time.clock()
        self.xfr_cat_fit = ExtraTreesRegressor(n_estimators =self.nest,max_depth=7)
        self.xfr_cat_fit.fit(data_train[self.catcol],self.target_train)
        #end_xfr_cat = time.clock()
        #print >> log, "time_xfr_cat = ", end_xfr_cat-start_xfr_cat
        return self

    def predict(self,data_test):
        mix_test_list = []

        transformed_test_gbr = self.gbr.transform(data_test,threshold="0.35*mean")
        mix_test_list += [pd.Series(self.gbr_tr_fit.predict(transformed_test_gbr))]

        transformed_test_xfr = self.xfr.transform(data_test,threshold="0.35*mean")
        mix_test_list += [pd.Series(self.xfr_tr_fit.predict(transformed_test_xfr))]

        mix_test_list += [pd.Series(self.gbr_cat_fit.predict(data_test[self.catcol]))]

        mix_test_list += [pd.Series(self.xfr_cat_fit.predict(data_test[self.catcol]))]

        mix_test = pd.concat(mix_test_list,1)

        mix_ave = mix_test.mean(1)
        mix_ave.name='target'

        return mix_ave
    def score(self,data_test,target_test):
        total_score = []
        transformed_test_gbr = self.gbr.transform(data_test,threshold="0.35*mean")
        total_score += [ self.gbr_tr_fit.score(transformed_test_gbr,target_test) ]
        transformed_test_xfr = self.xfr.transform(data_test,threshold="0.35*mean")
        total_score += [ self.xfr_tr_fit.score(transformed_test_xfr,target_test) ]
        total_score += [ self.gbr_cat_fit.score(data_test[self.catcol],target_test) ]
        total_score += [ self.xfr_cat_fit.score(data_test[self.catcol],target_test) ]
        return sum(total_score)/float(len(total_score))

    def gini(self,data_test,target_test):
        weight = data_test.var11
        gns = []
        transformed_test_gbr = self.gbr.transform(data_test,threshold="0.35*mean")
        gns += [normalized_weighted_gini(target_test.tolist(),self.gbr_tr_fit.predict(transformed_test_gbr).tolist(),weight.tolist()) ]
        transformed_test_xfr = self.xfr.transform(data_test,threshold="0.35*mean")
        gns += [normalized_weighted_gini(target_test.tolist(),self.xfr_tr_fit.predict(transformed_test_xfr).tolist(),weight.tolist()) ]
        gns += [normalized_weighted_gini(target_test.tolist(),self.gbr_cat_fit.predict(data_test[self.catcol]).tolist(),weight.tolist()) ]
        gns += [normalized_weighted_gini(target_test.tolist(),self.xfr_cat_fit.predict(data_test[self.catcol]).tolist(),weight.tolist()) ]
        return sum(gns)/float(len(gns))
开发者ID:kirilligum,项目名称:cdips-fire,代码行数:78,代码来源:cvbari.py

示例8: timeit

# 需要导入模块: from sklearn.ensemble import ExtraTreesRegressor [as 别名]
# 或者: from sklearn.ensemble.ExtraTreesRegressor import score [as 别名]
X = scaler.transform(X)

timeit("Standardizing the data")
'''

from sklearn.ensemble import ExtraTreesRegressor
#from sklearn.neighbors import KNeighborsRegressor

clf = ExtraTreesRegressor(n_estimators=10)
#clf = KNeighborsRegressor()

clf.fit(X_train, Y_train)

timeit("Training")

print "Validation score: " + str(clf.score(X_test, Y_test))

timeit("Validation")

#score = 0.
#wrong = []
#for i, item in enumerate(X_test):
#	if unconvert(clf.predict(item)[0]) == unconvert(Y_test[i]):
#		score += 1
#	else:
#		wrong.append((unconvert(clf.predict(item)[0]),unconvert(Y_test[i])))
#score /= len(X_test)
#print "Manual validation score: " + str(score)

#timeit("Manual validation")
开发者ID:c0d3rman,项目名称:Conner-SpellNet,代码行数:32,代码来源:regression.py


注:本文中的sklearn.ensemble.ExtraTreesRegressor.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。