本文整理汇总了Python中sklearn.ensemble.BaggingRegressor.score方法的典型用法代码示例。如果您正苦于以下问题:Python BaggingRegressor.score方法的具体用法?Python BaggingRegressor.score怎么用?Python BaggingRegressor.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.BaggingRegressor
的用法示例。
在下文中一共展示了BaggingRegressor.score方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bootstrap_samples
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import score [as 别名]
def test_bootstrap_samples():
"""Test that bootstraping samples generate non-perfect base estimators."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
示例2: test_oob_score_regression
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import score [as 别名]
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
示例3: test_bootstrap_samples
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import score [as 别名]
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# check that each sampling correspond to a complete bootstrap resample.
# the size of each bootstrap should be the same as the input data but
# the data should be different (checked using the hash of the data).
ensemble = BaggingRegressor(base_estimator=DummySizeEstimator(),
bootstrap=True).fit(X_train, y_train)
training_hash = []
for estimator in ensemble.estimators_:
assert estimator.training_size_ == X_train.shape[0]
training_hash.append(estimator.training_hash_)
assert len(set(training_hash)) == len(training_hash)
示例4: RandomForestRegressor
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import score [as 别名]
print '******************************************'
print name
print '******************************************'
if name=='Boston' or name=='Diabetes': # Regression problem
rfr = RandomForestRegressor(**params)
rfr.fit(X, y)
print 'Score RandomForestRegressor = %s' % (rfr.score(X, y))
scores_rfr = cross_val_score(rfr, X, y ,cv=5)
print 'Cross Val Score RandomForestRegressor = %s' % (np.mean(scores_rfr))
br = BaggingRegressor(base_estimator=DecisionTreeRegressor(max_depth=max_depth), n_estimators=n_estimators)
br.fit(X, y)
print 'Score BaggingRegressor = %s' % (br.score(X, y))
scores_br = cross_val_score(br, X, y, cv=5)
print 'Cross Val Scores of BR = %s' %(np.mean(scores_br))
if name=='Iris' or name=='Digits': # Classificaiton problem
rfc = RandomForestClassifier(**params)
rfc.fit(X, y)
print 'Score RandomForestClassifier = %s' % (rfc.score(X, y))
scores_rfc = cross_val_score(rfc, X, y ,cv=5)
print 'Corss Val Scores of RandomForestClassifier = %s' %(np.mean(scores_rfc))
bc = BaggingClassifier(base_estimator=DecisionTreeClassifier(max_depth=max_depth), n_estimators=n_estimators)
bc.fit(X, y)
print 'Score BaggingClassifier == %s' % (bc.score(X, y))
scores_bc = cross_val_score(bc, X, y, cv=5)
示例5: bagging
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import score [as 别名]
def bagging(df1, features, pred_var, df2):
lr = BaggingRegressor()
lr.fit(df1[features], df1[pred_var])
print 'BaggingClassifier Score: ', lr.score(df2[features], df2[pred_var])
示例6: RandomForestRegressor
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import score [as 别名]
print '******************************************'
if name=='Boston': # Regression problem
rfr = RandomForestRegressor(**params)
rfr.fit(X, y)
scores_rfr = cross_val_score(rfr, X, y ,cv=5)
br = BaggingRegressor(base_estimator=DecisionTreeRegressor(max_depth=max_depth), n_estimators=n_estimators)
br.fit(X, y)
scores_br = cross_val_score(br, X, y, cv=5)
boston[i,1] = rfr.score(X, y)
boston[i,2] = np.mean(scores_rfr)
boston[i,3] = np.std(scores_rfr)
boston[i,4] = br.score(X, y)
boston[i,5] = np.mean(np.mean(scores_br))
boston[i,6] = np.std(scores_br)
print 'Score RandomForestRegressor = %s' % ( boston[i,1])
print 'Cross Val : mean = %s' % (boston[i,2])
print 'Cross Val : std = %s' % (boston[i,3])
print 'Score BaggingRegressor = %s' % (boston[i,4])
print 'Cross Val : mean = %s' %(boston[i,5])
print 'Cross Val : std = %s' %(boston[i,6])
if name=='Diabetes': # Regression problem
rfr = RandomForestRegressor(**params)
rfr.fit(X, y)
scores_rfr = cross_val_score(rfr, X, y ,cv=5)