本文整理汇总了Python中sklearn.ensemble.GradientBoostingRegressor方法的典型用法代码示例。如果您正苦于以下问题:Python ensemble.GradientBoostingRegressor方法的具体用法?Python ensemble.GradientBoostingRegressor怎么用?Python ensemble.GradientBoostingRegressor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble
的用法示例。
在下文中一共展示了ensemble.GradientBoostingRegressor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ensure_many_models
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def ensure_many_models(self):
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR, LinearSVR
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=ConvergenceWarning)
for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
KNeighborsRegressor, SVR, LinearSVR]:
learner = learner()
learner_name = str(learner).split("(", maxsplit=1)[0]
with self.subTest("Test fit using {learner}".format(learner=learner_name)):
model = self.estimator.__class__(learner)
model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
self.assertTrue(True) # Fit did not crash
示例2: build_ensemble
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def build_ensemble(**kwargs):
"""Generate ensemble."""
ens = SuperLearner(**kwargs)
prep = {'Standard Scaling': [StandardScaler()],
'Min Max Scaling': [MinMaxScaler()],
'No Preprocessing': []}
est = {'Standard Scaling':
[ElasticNet(), Lasso(), KNeighborsRegressor()],
'Min Max Scaling':
[SVR()],
'No Preprocessing':
[RandomForestRegressor(random_state=SEED),
GradientBoostingRegressor()]}
ens.add(est, prep)
ens.add(GradientBoostingRegressor(), meta=True)
return ens
示例3: test_partial_dependence_sample_weight
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_partial_dependence_sample_weight():
# Test near perfect correlation between partial dependence and diagonal
# when sample weights emphasize y = x predictions
N = 1000
rng = np.random.RandomState(123456)
mask = rng.randint(2, size=N, dtype=bool)
x = rng.rand(N)
# set y = x on mask and y = -x outside
y = x.copy()
y[~mask] = -y[~mask]
X = np.c_[mask, x]
# sample weights to emphasize data points where y = x
sample_weight = np.ones(N)
sample_weight[mask] = 1000.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(X, y, sample_weight=sample_weight)
grid = np.arange(0, 1, 0.01)
pdp = partial_dependence(clf, [1], grid=grid)
assert np.corrcoef(np.ravel(pdp[0]), grid)[0, 1] > 0.99
示例4: test_regressor_parameter_checks
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_regressor_parameter_checks():
# Check input parameter validation for GradientBoostingRegressor
assert_raise_message(ValueError, "alpha must be in (0.0, 1.0) but was 1.2",
GradientBoostingRegressor(loss='huber', alpha=1.2)
.fit, X, y)
assert_raise_message(ValueError, "alpha must be in (0.0, 1.0) but was 1.2",
GradientBoostingRegressor(loss='quantile', alpha=1.2)
.fit, X, y)
assert_raise_message(ValueError, "Invalid value for max_features: "
"'invalid'. Allowed string values are 'auto', 'sqrt'"
" or 'log2'.",
GradientBoostingRegressor(max_features='invalid').fit,
X, y)
assert_raise_message(ValueError, "n_iter_no_change should either be None"
" or an integer. 'invalid' was passed",
GradientBoostingRegressor(n_iter_no_change='invalid')
.fit, X, y)
allowed_presort = ('auto', True, False)
assert_raise_message(ValueError,
"'presort' should be in {}. "
"Got 'invalid' instead.".format(allowed_presort),
GradientBoostingRegressor(presort='invalid')
.fit, X, y)
示例5: test_check_inputs_predict
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
示例6: test_staged_predict
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_almost_equal(y_pred, y)
示例7: test_warm_start
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_warm_start(Cls):
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
if Cls is GradientBoostingRegressor:
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
else:
# Random state is preserved and hence predict_proba must also be
# same
assert_array_equal(est_ws.predict(X), est.predict(X))
assert_array_almost_equal(est_ws.predict_proba(X),
est.predict_proba(X))
示例8: test_gradient_boosting_with_init
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_gradient_boosting_with_init(gb, dataset_maker, init_estimator):
# Check that GradientBoostingRegressor works when init is a sklearn
# estimator.
# Check that an error is raised if trying to fit with sample weight but
# inital estimator does not support sample weight
X, y = dataset_maker()
sample_weight = np.random.RandomState(42).rand(100)
# init supports sample weights
init_est = init_estimator()
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
# init does not support sample weights
init_est = _NoSampleWeightWrapper(init_estimator())
gb(init=init_est).fit(X, y) # ok no sample weights
with pytest.raises(ValueError,
match="estimator.*does not support sample weights"):
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
示例9: test_multi_target_regression
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
# 0.23. warning about tol not having its correct default value.
示例10: test_multi_target_sample_weights
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
示例11: __init__
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def __init__(self, q1=.16, q2=.84,**params):
"""
Gradient boosted trees as surrogate model for Bayesian Optimization.
Uses quantile regression for an estimate of the 'posterior' variance.
In practice, the std is computed as (`q2` - `q1`) / 2.
Relies on `sklearn.ensemble.GradientBoostingRegressor`
Parameters
----------
q1: float
First quantile.
q2: float
Second quantile
params: tuple
Extra parameters to pass to `GradientBoostingRegressor`
"""
self.params = params
self.q1 = q1
self.q2 = q2
self.eps = 1e-1
示例12: fit
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def fit(self, X, y):
"""
Fit a GBM model to data `X` and targets `y`.
Parameters
----------
X : array-like
Input values.
y: array-like
Target values.
"""
self.X = X
self.y = y
self.n = self.X.shape[0]
self.modq1 = GradientBoostingRegressor(loss='quantile', alpha=self.q1, **self.params)
self.modq2 = GradientBoostingRegressor(loss='quantile', alpha=self.q2, **self.params)
self.mod = GradientBoostingRegressor(loss = 'ls', **self.params)
self.modq1.fit(self.X, self.y)
self.modq2.fit(self.X, self.y)
self.mod.fit(self.X, self.y)
示例13: test_boston_OHE_plus_trees
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_boston_OHE_plus_trees(self):
data = load_boston()
pl = Pipeline(
[
("OHE", OneHotEncoder(categorical_features=[8], sparse=False)),
("Trees", GradientBoostingRegressor(random_state=1)),
]
)
pl.fit(data.data, data.target)
# Convert the model
spec = convert(pl, data.feature_names, "target")
if _is_macos() and _macos_version() >= (10, 13):
# Get predictions
df = pd.DataFrame(data.data, columns=data.feature_names)
df["prediction"] = pl.predict(data.data)
# Evaluate it
result = evaluate_regressor(spec, df, "target", verbose=False)
assert result["max_error"] < 0.0001
示例14: train_model
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def train_model(self, train_file_path, model_path):
print("==> Load the data ...")
X_train, Y_train = self.load_file(train_file_path)
print(train_file_path, shape(X_train))
print("==> Train the model ...")
min_max_scaler = preprocessing.MaxAbsScaler()
X_train_minmax = min_max_scaler.fit_transform(X_train)
clf = GradientBoostingRegressor(n_estimators=self.n_estimators)
clf.fit(X_train_minmax.toarray(), Y_train)
print("==> Save the model ...")
pickle.dump(clf, open(model_path, 'wb'))
scaler_path = model_path.replace('.pkl', '.scaler.pkl')
pickle.dump(min_max_scaler, open(scaler_path, 'wb'))
return clf
示例15: test_same_prediction
# 需要导入模块: from sklearn import ensemble [as 别名]
# 或者: from sklearn.ensemble import GradientBoostingRegressor [as 别名]
def test_same_prediction(self):
from sklearn.ensemble import GradientBoostingRegressor
params = {'n_estimators': 1, 'max_depth': 2, 'min_samples_split': 2,
'learning_rate': 0.8, 'loss': 'ls'}
sklearn_model = GradientBoostingRegressor(**params)
sklearn_model.fit(self.data.X.values, self.data.y.values)
sklearn_tree = sklearn_model.estimators_[0][0].tree_
bartpy_tree = Tree([LeafNode(Split(self.data))])
map_sklearn_tree_into_bartpy(bartpy_tree, sklearn_tree)
sklearn_predictions = sklearn_tree.predict(self.data.X.values.astype(np.float32))
sklearn_predictions = [round(x, 2) for x in sklearn_predictions.reshape(-1)]
bartpy_tree.cache_up_to_date = False
bartpy_tree_predictions = bartpy_tree.predict(self.data.X.values)
bartpy_tree_predictions = [round(x, 2) for x in bartpy_tree_predictions]
self.assertListEqual(sklearn_predictions, bartpy_tree_predictions)