本文整理汇总了Python中xgboost.XGBRegressor方法的典型用法代码示例。如果您正苦于以下问题:Python xgboost.XGBRegressor方法的具体用法?Python xgboost.XGBRegressor怎么用?Python xgboost.XGBRegressor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xgboost
的用法示例。
在下文中一共展示了xgboost.XGBRegressor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Train
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def Train(data, modelcount, censhu, yanzhgdata):
model = xgb.XGBRegressor(max_depth=censhu, learning_rate=0.1, n_estimators=modelcount, silent=True, objective='reg:gamma')
model.fit(data[:, :-1], data[:, -1])
# 给出训练数据的预测值
train_out = model.predict(data[:, :-1])
# 计算MSE
train_mse = mse(data[:, -1], train_out)
# 给出验证数据的预测值
add_yan = model.predict(yanzhgdata[:, :-1])
# 计算MSE
add_mse = mse(yanzhgdata[:, -1], add_yan)
print(train_mse, add_mse)
return train_mse, add_mse
# 最终确定组合的函数
示例2: test_xgb_regressor
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_xgb_regressor(self):
# Train model
training_data = datasets.make_regression(n_features=5)
regressor = XGBRegressor()
regressor.fit(training_data[0], training_data[1])
# Get some test results
test_data = [[0.1, 0.2, 0.3, -0.5, 1.0], [1.6, 2.1, -10, 50, -1.0]]
test_results = regressor.predict(np.asarray(test_data))
# Serialise the models to Elasticsearch
feature_names = ["f0", "f1", "f2", "f3", "f4"]
model_id = "test_xgb_regressor"
es_model = ImportedMLModel(
ES_TEST_CLIENT, model_id, regressor, feature_names, overwrite=True
)
es_results = es_model.predict(test_data)
np.testing.assert_almost_equal(test_results, es_results, decimal=2)
# Clean up
es_model.delete_model()
示例3: _dispatch_gbdt_class
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def _dispatch_gbdt_class(algorithm_type: str, type_of_target: str):
is_regression = type_of_target == 'continuous'
if algorithm_type == 'lgbm':
requires_lightgbm()
from lightgbm import LGBMClassifier, LGBMRegressor
return LGBMRegressor if is_regression else LGBMClassifier
elif algorithm_type == 'cat':
requires_catboost()
from catboost import CatBoostClassifier, CatBoostRegressor
return CatBoostRegressor if is_regression else CatBoostClassifier
else:
requires_xgboost()
assert algorithm_type == 'xgb'
from xgboost import XGBClassifier, XGBRegressor
return XGBRegressor if is_regression else XGBClassifier
示例4: _train_convert_evaluate_assert
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def _train_convert_evaluate_assert(self, bt_params={}, allowed_error={}, **params):
"""
Set up the unit test by loading the dataset and training a model.
"""
# Train a model
xgb_model = xgboost.XGBRegressor(**params)
xgb_model.fit(self.X, self.target)
# Convert the model (feature_names can't be given because of XGboost)
spec = xgb_converter.convert(
xgb_model, self.feature_names, self.output_name, force_32bit_float=False
)
if _is_macos() and _macos_version() >= (10, 13):
# Get predictions
df = pd.DataFrame(self.X, columns=self.feature_names)
df["prediction"] = xgb_model.predict(self.X)
# Evaluate it
metrics = evaluate_regressor(spec, df, target="target", verbose=False)
self._check_metrics(metrics, bt_params, allowed_error)
示例5: opt_pro
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def opt_pro(optimization_protocol):
opt = optimization_protocol(iterations=3, random_state=32, n_initial_points=1)
opt.forge_experiment(
model_initializer=XGBRegressor,
model_init_params=dict(
max_depth=Integer(2, 10),
n_estimators=Integer(50, 300),
learning_rate=Real(0.1, 0.9),
subsample=0.5,
booster=Categorical(["gbtree", "gblinear"]),
),
model_extra_params=dict(fit=dict(eval_metric=Categorical(["rmse", "mae"]))),
feature_engineer=FeatureEngineer([Categorical([nothing_transform], optional=True)]),
)
opt.go()
return opt
##################################################
# Feature Engineering Steps
##################################################
示例6: test_optional_step_matching
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_optional_step_matching(env_boston, feature_engineer):
"""Tests that a Space containing `optional` `Categorical` Feature Engineering steps matches with
the expected saved Experiments. This regression test is focused on issues that arise when
`EngineerStep`s other than the last one in the `FeatureEngineer` are `optional`. The simplified
version of this test below, :func:`test_limited_optional_step_matching`, demonstrates that
result matching works properly when only the final `EngineerStep` is `optional`"""
opt_0 = DummyOptPro(iterations=20, random_state=32)
opt_0.forge_experiment(XGBRegressor, feature_engineer=feature_engineer)
opt_0.go()
opt_1 = ExtraTreesOptPro(iterations=20, random_state=32)
opt_1.forge_experiment(XGBRegressor, feature_engineer=feature_engineer)
opt_1.get_ready()
# Assert `opt_1` matched with all Experiments executed by `opt_0`
assert len(opt_1.similar_experiments) == opt_0.successful_iterations
示例7: test_optional_step_matching_by_exp
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_optional_step_matching_by_exp(env_boston, es_0, es_1, es_2):
"""Test that the result of an Experiment is correctly matched by an OptPro with all-`optional`
`EngineerStep` dimensions"""
feature_engineer = [_ for _ in [es_0, es_1, es_2] if _ is not None]
exp_0 = CVExperiment(XGBRegressor, feature_engineer=feature_engineer)
opt_0 = ExtraTreesOptPro(iterations=1, random_state=32)
opt_0.forge_experiment(
XGBRegressor,
feature_engineer=[
Categorical([es_a], optional=True),
Categorical([es_b, es_c], optional=True),
Categorical([es_d, es_e], optional=True),
],
)
opt_0.get_ready()
# Assert `opt_0` matched with `exp_0`
assert len(opt_0.similar_experiments) == 1
示例8: test_predict_single_feature_vector
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_predict_single_feature_vector(self):
# Train model
training_data = datasets.make_regression(n_features=1)
regressor = XGBRegressor()
regressor.fit(training_data[0], training_data[1])
# Get some test results
test_data = [[0.1]]
test_results = regressor.predict(np.asarray(test_data))
# Serialise the models to Elasticsearch
feature_names = ["f0"]
model_id = "test_xgb_regressor"
es_model = ImportedMLModel(
ES_TEST_CLIENT, model_id, regressor, feature_names, overwrite=True
)
# Single feature
es_results = es_model.predict(test_data[0])
np.testing.assert_almost_equal(test_results, es_results, decimal=2)
# Clean up
es_model.delete_model()
示例9: test_tree_ensemble_regressor_xgboost
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_tree_ensemble_regressor_xgboost(self):
this = os.path.dirname(__file__)
data_train = pandas.read_csv(os.path.join(this, "xgboost.model.xgb.n4.d3.train.txt"), header=None)
X = data_train.iloc[:, 1:].values
y = data_train.iloc[:, 0].values
params = dict(n_estimator=4, max_depth=3)
model = XGBRegressor(**params).fit(X, y)
# See https://github.com/apple/coremltools/issues/51.
model.booster = model.get_booster
model_coreml = convert_xgb_to_coreml(model)
model_onnx = convert_cml(model_coreml)
assert model_onnx is not None
if sys.version_info[0] >= 3:
# python 2.7 returns TypeError: can't pickle instancemethod objects
dump_data_and_model(X.astype(numpy.float32), model, model_onnx,
basename="CmlXGBoostRegressor-OneOff-Reshape",
allow_failure=True)
示例10: test_xgb_regressor
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_xgb_regressor(self):
iris = load_diabetes()
x = iris.data
y = iris.target
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
xgb = XGBRegressor()
xgb.fit(x_train, y_train)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=['None', 'None']))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test.astype("float32"),
xgb,
conv_model,
basename="SklearnXGBRegressor-Dec3",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
示例11: fit
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def fit(self, X, y):
"""load the data in, initiate the models"""
self.X = X
self.y = y
self.opt_XGBoost_reg = xgb.XGBRegressor(**self.opt_xgb_params)
self.opt_forest_reg = RandomForestRegressor(**self.opt_rf_params)
self.opt_svm_reg = SVR(**self.opt_svm_params)
""" fit the models """
self.opt_XGBoost_reg.fit(self.X ,self.y)
self.opt_forest_reg.fit(self.X ,self.y)
self.opt_svm_reg.fit(self.X ,self.y)
示例12: fit
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def fit(self, dataset, **kwargs):
"""
Fits XGBoost model to data.
"""
X = dataset.X
y = np.squeeze(dataset.y)
w = np.squeeze(dataset.w)
seed = self.model_instance.random_state
import xgboost as xgb
if isinstance(self.model_instance, xgb.XGBClassifier):
xgb_metric = "auc"
sklearn_metric = "roc_auc"
stratify = y
elif isinstance(self.model_instance, xgb.XGBRegressor):
xgb_metric = "mae"
sklearn_metric = "neg_mean_absolute_error"
stratify = None
best_param = self._search_param(sklearn_metric, X, y)
# update model with best param
self.model_instance = self.model_class(**best_param)
# Find optimal n_estimators based on original learning_rate
# and early_stopping_rounds
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=seed, stratify=stratify)
self.model_instance.fit(
X_train,
y_train,
early_stopping_rounds=self.early_stopping_rounds,
eval_metric=xgb_metric,
eval_set=[(X_train, y_train), (X_test, y_test)],
verbose=self.verbose)
# Since test size is 20%, when retrain model to whole data, expect
# n_estimator increased to 1/0.8 = 1.25 time.
estimated_best_round = np.round(self.model_instance.best_ntree_limit * 1.25)
self.model_instance.n_estimators = np.int64(estimated_best_round)
self.model_instance.fit(X, y, eval_metric=xgb_metric, verbose=self.verbose)
示例13: test_xgboost_regression
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_xgboost_regression(self):
import xgboost
np.random.seed(123)
dataset = sklearn.datasets.load_diabetes()
X, y = dataset.data, dataset.target
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.NumpyDataset(X_train, y_train)
test_dataset = dc.data.NumpyDataset(X_test, y_test)
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
# Set early stopping round = n_estimators so that esr won't work
esr = {'early_stopping_rounds': 50}
xgb_model = xgboost.XGBRegressor(n_estimators=50, random_state=123)
model = dc.models.XGBoostModel(xgb_model, verbose=False, **esr)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
assert scores[regression_metric.name] < 55
示例14: test_xgboost_multitask_regression
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def test_xgboost_multitask_regression(self):
import xgboost
np.random.seed(123)
n_tasks = 4
tasks = range(n_tasks)
dataset = sklearn.datasets.load_diabetes()
X, y = dataset.data, dataset.target
y = np.reshape(y, (len(y), 1))
y = np.hstack([y] * n_tasks)
frac_train = .7
n_samples = len(X)
n_train = int(frac_train * n_samples)
X_train, y_train = X[:n_train], y[:n_train]
X_test, y_test = X[n_train:], y[n_train:]
train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train)
test_dataset = dc.data.DiskDataset.from_numpy(X_test, y_test)
regression_metric = dc.metrics.Metric(dc.metrics.mae_score)
esr = {'early_stopping_rounds': 50}
def model_builder(model_dir):
xgb_model = xgboost.XGBRegressor(n_estimators=50, seed=123)
return dc.models.XGBoostModel(xgb_model, model_dir, verbose=False, **esr)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
# Eval model on test
scores = model.evaluate(test_dataset, [regression_metric])
for score in scores[regression_metric.name]:
assert score < 50
示例15: load_spark_model
# 需要导入模块: import xgboost [as 别名]
# 或者: from xgboost import XGBRegressor [as 别名]
def load_spark_model(model_path, metadata_path):
import xgboost as xgb
import json
import numpy as np
if not isinstance(model_path, str) or not isinstance(model_path, str):
raise ValueError("model and metadata paths must be str, not {0} and {1}".format(type(model_path), type(metadata_path)))
with open(metadata_path) as f:
metadata = json.loads(f.read().strip())
xgb_class = metadata.get("class")
if xgb_class == "ml.dmlc.xgboost4j.scala.spark.XGBoostClassificationModel":
clf = xgb.XGBClassifier()
setattr(clf, "base_score", metadata["paramMap"]["baseScore"])
elif xgb_class == "ml.dmlc.xgboost4j.scala.spark.XGBoostRegressionModel":
clf = xgb.XGBRegressor()
else:
raise ValueError("Unsupported model.")
setattr(clf, "objective", metadata["paramMap"]["objective"])
setattr(clf, "missing",
np.nan if metadata["paramMap"]["missing"] in ["NaN", "nan", "null", "None"] else metadata["paramMap"][
"missing"])
setattr(clf, "booster", metadata["paramMap"].get("booster", "gbtree"))
setattr(clf, "n_estimators", metadata["paramMap"].get("numRound", 1))
booster = xgb.Booster()
booster.load_model(model_path)
clf._Booster = booster
return clf