当前位置: 首页>>代码示例>>Python>>正文


Python GradientBoostingRegressor.loss_方法代码示例

本文整理汇总了Python中sklearn.ensemble.GradientBoostingRegressor.loss_方法的典型用法代码示例。如果您正苦于以下问题:Python GradientBoostingRegressor.loss_方法的具体用法?Python GradientBoostingRegressor.loss_怎么用?Python GradientBoostingRegressor.loss_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.ensemble.GradientBoostingRegressor的用法示例。


在下文中一共展示了GradientBoostingRegressor.loss_方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: GradBoost

# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import loss_ [as 别名]
def GradBoost(X,y):
    #load dataset
    #boston = datasets.load_boston()
   # X, y = shuffle(boston.data, boston.target, random_state=13)
    X = X.astype(np.float32)
    y = y.astype(np.float32)
    offset = int(X.shape[0]*0.7)
    train_X, train_y = X[:offset], y[:offset]
    test_X, test_y = X[offset:], y[offset:]
    
    #model building
    params = {'n_estimators':1500, 'max_depth':4, 'min_samples_split':2, 'learning_rate':0.1, 'loss':'ls' }
    cls = GradientBoostingRegressor(**params)
    cls.fit(train_X,train_y)
    MAE = mean_absolute_error(test_y,cls.predict(test_X))
  #  print "MSE: %f"%MSE
    
    #plot trianing deviance
    test_score = np.zeros((params['n_estimators'],),dtype=np.float64)
    for i,pred in enumerate(cls.staged_decision_function(test_X)):
        test_score[i] = cls.loss_(test_y,pred)
    pl.figure(figsize=(12,6))
    pl.subplot(1,2,1)
    pl.plot(np.arange(params['n_estimators'])+1, cls.train_score_, 'b-', label='Training set Deviance')
    pl.plot(np.arange(params['n_estimators'])+1, test_score, 'r-', label='Test set Deviance')
    
    # plot feature importance
    feature_importance = cls.feature_importances_
    feature_importance = (feature_importance / feature_importance.max()) * 100.0
    sorted_idx = np.argsort(feature_importance)
    pos = np.arange(sorted_idx.shape[0]) + .5
    pl.subplot(1,2,2)
    pl.barh(pos, feature_importance[sorted_idx],align='center')
    #pl.yticks(pos,boston.feature_names[sorted_idx])
    pl.xlabel("Relative importance")
    pl.title("Variable importance")
    pl.show()
    return MAE, cls
开发者ID:colinsongf,项目名称:Solar-Energy-Prediction-Contest,代码行数:40,代码来源:Gradient_Boosting(with+sklearn).py

示例2: RandomForestRegressor

# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import loss_ [as 别名]
svr.fit(train_X[:,whichones[0]],train_Y)

#%%
#try bagging/boosting etc
#rfr = RandomForestRegressor(n_estimators = 30,n_jobs = 2)

#rfr.fit(train_X,train_Y)



gbr = GradientBoostingRegressor(loss='ls',n_estimators=100,learning_rate=0.1,max_leaf_nodes=9,verbose=True,subsample=0.5)

gbr.fit(train_X,train_Y)

test_deviance = [ gbr.loss_(y_pred,test_Y) for y_pred in gbr.staged_decision_function(test_X)]

#%%
####################################
#DECOMPOSITION STARTS HERE
#############################
#try approx pca, probabilistic PCA, sparse pca, kernel pca
#with: linear, non-linear methods

#try the decomposition on the unlagged audio, then re-lag them
#no normalization in X needed here

#for 33 features
nfeat=33
unlagged_stimuli = recover_stimuli(X,nfeat)
nstims = [stim.shape[1] for stim in unlagged_stimuli]
开发者ID:mjboos,项目名称:auditoryBSC,代码行数:32,代码来源:RealDataAnalysis.py

示例3: int

# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import loss_ [as 别名]
offset = int(x_train.shape[0] * 0.9)
x_train_fit, y_train_fit = x_train[:offset], y_train[:offset]
x_test_fit, y_test_fit = x_train[offset:], y_train[offset:]

clf = GradientBoostingRegressor(n_estimators = 700,max_depth=6,learning_rate = 0.01,subsample=1.0 )

clf.fit(x_train_fit, y_train_fit)
rmse = fmean_squared_error(y_test_fit,clf.predict(x_test_fit))

print("RMSE: %.4f" % rmse)

#ploting, learning rate 0.01 see which n_estimatores should choose, then grid search the max_depth, then do bagging on that
test_score = np.zeros((700,), dtype=np.float64)

for i, y_pred in enumerate(clf.staged_predict(x_test_fit)):
    test_score[i] = clf.loss_(y_test_fit, y_pred)

plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(700) + 1, clf.train_score_, 'b-',
         label='Training Set Deviance')
plt.plot(np.arange(700) + 1, test_score, 'r-',
         label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.show()

from sklearn import pipeline, grid_search
import time
开发者ID:arslanoqads,项目名称:PyMe,代码行数:33,代码来源:counting_feature+jcd_dice_distance+modeling.py

示例4: GradientBoostingRegressor

# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import loss_ [as 别名]
classifiersTest = np.array(classifiersTest, np.float)

params = {'n_estimators': 110, 'max_depth': 3, 'learning_rate': 0.05, 'loss': 'huber', 'alpha': 0.95}
clf = GradientBoostingRegressor(**params).fit(samplesTrain, classifiersTrain)

mse = mean_squared_error(classifiersTest, clf.predict(samplesTest))
r2 = r2_score(classifiersTest, clf.predict(samplesTest))

print("MSE: %.4f" % mse)
print("R2: %.4f" % r2)

# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)

for i, classifiersTrain in enumerate(clf.staged_decision_function(samplesTest)):
    test_score[i] = clf.loss_(classifiersTest, classifiersTrain)

plt.figure(figsize=(12, 6))
plt.subplot(1, 1, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-', label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-', label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')


feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
开发者ID:IdeaHaven,项目名称:quora-challenge,代码行数:33,代码来源:gbmApproach2.py

示例5: GradientBoostingRegressor

# 需要导入模块: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
# 或者: from sklearn.ensemble.GradientBoostingRegressor import loss_ [as 别名]
# GBM parametor 설정(이부분을 R 소스와 유사하게 설정해주어야 하는데 잘 모르겠음.;;)
params = {'n_estimators': 6000, 'max_depth': 1, 'learning_rate': 0.01, 'warm_start': False, 'loss':'lad'}

# model fit
gbm0 = GradientBoostingRegressor(**params)
gbm0.fit(x_train, y_train)

# score list 생성(estimators 크기만큼)
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)

bestY_pred = []

# iteration 하면서 가장 score(deviation값)가 낮은(?) 값이 최적값...인듯?
for i, y_pred in enumerate(gbm0.staged_predict(x_test)):
    test_score[i] = gbm0.loss_(y_test, y_pred)
    if i > 0 and test_score[i] < test_score[i-1]:
        bestY_pred = y_pred

# bestY_pred가 최적의 예측 데이터인듯...  bestY_pred를 예측 셋으로 넘겨서 사용하면 됨
print(bestY_pred)


########################################################################
# 여기서 부터는 옵션으로 plot 해보기

plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, gbm0.train_score_, 'b-', label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-', label='Test Set Deviance')
开发者ID:tj0822,项目名称:Python,代码行数:32,代码来源:gbmConvert.py


注:本文中的sklearn.ensemble.GradientBoostingRegressor.loss_方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。