本文整理汇总了Python中sklearn.ensemble.GradientBoostingRegressor.staged_decision_function方法的典型用法代码示例。如果您正苦于以下问题:Python GradientBoostingRegressor.staged_decision_function方法的具体用法?Python GradientBoostingRegressor.staged_decision_function怎么用?Python GradientBoostingRegressor.staged_decision_function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.GradientBoostingRegressor
的用法示例。
在下文中一共展示了GradientBoostingRegressor.staged_decision_function方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testingGBM
# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import staged_decision_function [as 别名]
def testingGBM(X_train, Y_train, X_test, Y_test):
params = {'verbose':2, 'n_estimators':100, 'max_depth':50, 'min_samples_leaf':20, 'learning_rate':0.1, 'loss':'ls', 'max_features':None}
test_init = Ridge(alpha = 0.1, normalize = True, fit_intercept=True)
gbm2 = GradientBoostingRegressor(**params)
gbm2.fit(X_train, Y_train["Ca"])
yhat_gbm = gbm2.predict(X_test)
mean_squared_error(Y_test["Ca"], yhat_gbm)
math.sqrt(mean_squared_error(Y_test["Ca"], yhat_gbm))
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(gbm2.staged_decision_function(X_test)):
test_score[i]=mean_squared_error(Y_test["Ca"], y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, gbm2.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.show()
示例2: GradBoost
# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import staged_decision_function [as 别名]
def GradBoost(X,y):
#load dataset
#boston = datasets.load_boston()
# X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
y = y.astype(np.float32)
offset = int(X.shape[0]*0.7)
train_X, train_y = X[:offset], y[:offset]
test_X, test_y = X[offset:], y[offset:]
#model building
params = {'n_estimators':1500, 'max_depth':4, 'min_samples_split':2, 'learning_rate':0.1, 'loss':'ls' }
cls = GradientBoostingRegressor(**params)
cls.fit(train_X,train_y)
MAE = mean_absolute_error(test_y,cls.predict(test_X))
# print "MSE: %f"%MSE
#plot trianing deviance
test_score = np.zeros((params['n_estimators'],),dtype=np.float64)
for i,pred in enumerate(cls.staged_decision_function(test_X)):
test_score[i] = cls.loss_(test_y,pred)
pl.figure(figsize=(12,6))
pl.subplot(1,2,1)
pl.plot(np.arange(params['n_estimators'])+1, cls.train_score_, 'b-', label='Training set Deviance')
pl.plot(np.arange(params['n_estimators'])+1, test_score, 'r-', label='Test set Deviance')
# plot feature importance
feature_importance = cls.feature_importances_
feature_importance = (feature_importance / feature_importance.max()) * 100.0
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1,2,2)
pl.barh(pos, feature_importance[sorted_idx],align='center')
#pl.yticks(pos,boston.feature_names[sorted_idx])
pl.xlabel("Relative importance")
pl.title("Variable importance")
pl.show()
return MAE, cls
开发者ID:colinsongf,项目名称:Solar-Energy-Prediction-Contest,代码行数:40,代码来源:Gradient_Boosting(with+sklearn).py
示例3: RandomForestRegressor
# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import staged_decision_function [as 别名]
svr.fit(train_X[:,whichones[0]],train_Y)
#%%
#try bagging/boosting etc
#rfr = RandomForestRegressor(n_estimators = 30,n_jobs = 2)
#rfr.fit(train_X,train_Y)
gbr = GradientBoostingRegressor(loss='ls',n_estimators=100,learning_rate=0.1,max_leaf_nodes=9,verbose=True,subsample=0.5)
gbr.fit(train_X,train_Y)
test_deviance = [ gbr.loss_(y_pred,test_Y) for y_pred in gbr.staged_decision_function(test_X)]
#%%
####################################
#DECOMPOSITION STARTS HERE
#############################
#try approx pca, probabilistic PCA, sparse pca, kernel pca
#with: linear, non-linear methods
#try the decomposition on the unlagged audio, then re-lag them
#no normalization in X needed here
#for 33 features
nfeat=33
unlagged_stimuli = recover_stimuli(X,nfeat)
nstims = [stim.shape[1] for stim in unlagged_stimuli]
示例4: GradientBoostingRegressor
# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import staged_decision_function [as 别名]
samplesTest = np.array(samplesTest, np.float)
classifiersTest = np.array(classifiersTest, np.float)
params = {'n_estimators': 110, 'max_depth': 3, 'learning_rate': 0.05, 'loss': 'huber', 'alpha': 0.95}
clf = GradientBoostingRegressor(**params).fit(samplesTrain, classifiersTrain)
mse = mean_squared_error(classifiersTest, clf.predict(samplesTest))
r2 = r2_score(classifiersTest, clf.predict(samplesTest))
print("MSE: %.4f" % mse)
print("R2: %.4f" % r2)
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, classifiersTrain in enumerate(clf.staged_decision_function(samplesTest)):
test_score[i] = clf.loss_(classifiersTest, classifiersTrain)
plt.figure(figsize=(12, 6))
plt.subplot(1, 1, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-', label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-', label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())