本文整理匯總了Python中sklearn.ensemble.AdaBoostRegressor方法的典型用法代碼示例。如果您正苦於以下問題:Python ensemble.AdaBoostRegressor方法的具體用法?Python ensemble.AdaBoostRegressor怎麽用?Python ensemble.AdaBoostRegressor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.ensemble
的用法示例。
在下文中一共展示了ensemble.AdaBoostRegressor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: Train
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def Train(data, modelcount, censhu, yanzhgdata):
model = AdaBoostRegressor(DecisionTreeRegressor(max_depth=censhu),
n_estimators=modelcount, learning_rate=0.8)
model.fit(data[:, :-1], data[:, -1])
# 給出訓練數據的預測值
train_out = model.predict(data[:, :-1])
# 計算MSE
train_mse = mse(data[:, -1], train_out)
# 給出驗證數據的預測值
add_yan = model.predict(yanzhgdata[:, :-1])
# 計算MSE
add_mse = mse(yanzhgdata[:, -1], add_yan)
print(train_mse, add_mse)
return train_mse, add_mse
# 最終確定組合的函數
示例2: test_gridsearch
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
示例3: test_sample_weight_adaboost_regressor
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
示例4: test_multidimensional_X
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_multidimensional_X():
"""
Check that the AdaBoost estimators can work with n-dimensional
data matrix
"""
from sklearn.dummy import DummyClassifier, DummyRegressor
rng = np.random.RandomState(0)
X = rng.randn(50, 3, 3)
yc = rng.choice([0, 1], 50)
yr = rng.randn(50)
boost = AdaBoostClassifier(DummyClassifier(strategy='most_frequent'))
boost.fit(X, yc)
boost.predict(X)
boost.predict_proba(X)
boost = AdaBoostRegressor(DummyRegressor())
boost.fit(X, yr)
boost.predict(X)
示例5: run_sklearn
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def run_sklearn():
n_trees = 100
n_folds = 3
# https://www.analyticsvidhya.com/blog/2015/06/tuning-random-forest-model/
alg_list = [
['lreg',LinearRegression()],
['rforest',RandomForestRegressor(n_estimators=1000, n_jobs=-1, max_depth=3)],
['extree',ExtraTreesClassifier(n_estimators = 1000,max_depth=2)],
['adaboost',AdaBoostRegressor(base_estimator=None, n_estimators=600, learning_rate=1.0)],
['knn', sklearn.neighbors.KNeighborsRegressor(n_neighbors=5)]
]
start_time = time.time()
for name,alg in alg_list:
train = jhkaggle.train_sklearn.TrainSKLearn("1",name,alg,False)
train.run()
train = None
elapsed_time = time.time() - start_time
print("Elapsed time: {}".format(jhkaggle.util.hms_string(elapsed_time)))
示例6: sample_1031_4
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def sample_1031_4():
"""
10.3.1_4 豬老三使用回歸預測股價:使用集成學習算法預測股價AdaBoost與RandomForest
:return:
"""
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# AdaBoost
from sklearn.ensemble import AdaBoostRegressor
estimator = AdaBoostRegressor(n_estimators=100)
regress_process(estimator, train_x, train_y_regress, test_x,
test_y_regress)
plt.show()
# RandomForest
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor(n_estimators=100)
regress_process(estimator, train_x, train_y_regress, test_x, test_y_regress)
plt.show()
示例7: test_sample_weight_adaboost_regressor
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
示例8: Adaboost_First
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def Adaboost_First(self, data, max_depth=5, n_estimators=320):
model = AdaBoostRegressor(DecisionTreeRegressor(max_depth=max_depth),
n_estimators=n_estimators, learning_rate=0.8)
model.fit(data['train'][:, :-1], data['train'][:, -1])
# 注意存儲驗證數據集結果和預測數據集結果的不同
# 訓練數據集的預測結果
xul = model.predict(data['train'][:, :-1])
# 驗證的預測結果
yanre = model.predict(data['test'][:, :-1])
# 預測的預測結果
prer = model.predict(data['predict'][:, :-1])
# 儲存
self.yanzhneg_pr.append(yanre)
self.predi.append(prer)
# 分別計算訓練、驗證、預測的誤差
# 每計算一折後,要計算訓練、驗證、預測數據的誤差
xx = self.RMSE(xul, data['train'][:, -1])
yy = self.RMSE(yanre, data['test'][:, -1])
pp = self.RMSE(prer, data['predict'][:, -1])
# 儲存誤差
self.error_dict['AdaBoost'] = [xx, yy, pp]
# 驗證數據集的真實輸出結果
self.yanzhneg_real = data['test'][:, -1]
# 預測數據集的真實輸出結果
self.preal = data['predict'][:, -1]
return print('1層中的AdaBoost運行完畢')
# GBDT
示例9: recspre
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def recspre(exstr, predata, datadict, zhe, count=100):
tree, te = exstr.split('-')
model = AdaBoostRegressor(DecisionTreeRegressor(max_depth=int(te)),
n_estimators=int(tree), learning_rate=0.8)
model.fit(datadict[zhe]['train'][:, :-1], datadict[zhe]['train'][:, -1])
# 預測
yucede = model.predict(predata[:, :-1])
# 為了便於展示,選100條數據進行展示
zongleng = np.arange(len(yucede))
randomnum = np.random.choice(zongleng, count, replace=False)
yucede_se = list(np.array(yucede)[randomnum])
yuce_re = list(np.array(predata[:, -1])[randomnum])
# 對比
plt.figure(figsize=(17, 9))
plt.subplot(2, 1, 1)
plt.plot(list(range(len(yucede_se))), yucede_se, 'r--', label='預測', lw=2)
plt.scatter(list(range(len(yuce_re))), yuce_re, c='b', marker='.', label='真實', lw=2)
plt.xlim(-1, count + 1)
plt.legend()
plt.title('預測和真實值對比[最大樹數%d]' % int(tree))
plt.subplot(2, 1, 2)
plt.plot(list(range(len(yucede_se))), np.array(yuce_re) - np.array(yucede_se), 'k--', marker='s', label='真實-預測', lw=2)
plt.legend()
plt.title('預測和真實值相對誤差')
plt.savefig(r'C:\Users\GWT9\Desktop\duibi.jpg')
return '預測真實對比完畢'
# 主函數
示例10: test_regression_toy
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
示例11: test_boston
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert len(reg.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
示例12: test_pickle
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
示例13: test_sample_weight_missing
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def test_sample_weight_missing():
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
示例14: setClf
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def setClf(self):
# min_samples_split = 3
self.clf = AdaBoostRegressor()
return
示例15: __init__
# 需要導入模塊: from sklearn import ensemble [as 別名]
# 或者: from sklearn.ensemble import AdaBoostRegressor [as 別名]
def __init__(self, options):
self.handle_options(options)
params = options.get('params', {})
out_params = convert_params(
params,
strs=['loss', 'max_features'],
floats=['learning_rate'],
ints=['n_estimators'],
)
self.estimator = _AdaBoostRegressor(**out_params)