本文整理汇总了Python中sklearn.model_selection.GridSearchCV.set_params方法的典型用法代码示例。如果您正苦于以下问题:Python GridSearchCV.set_params方法的具体用法?Python GridSearchCV.set_params怎么用?Python GridSearchCV.set_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.model_selection.GridSearchCV
的用法示例。
在下文中一共展示了GridSearchCV.set_params方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_set_params_updates_valid_params
# 需要导入模块: from sklearn.model_selection import GridSearchCV [as 别名]
# 或者: from sklearn.model_selection.GridSearchCV import set_params [as 别名]
def test_set_params_updates_valid_params():
# Check that set_params tries to set SVC().C, not
# DecisionTreeClassifier().C
gscv = GridSearchCV(DecisionTreeClassifier(), {})
gscv.set_params(estimator=SVC(), estimator__C=42.0)
assert gscv.estimator.C == 42.0
示例2: train_regression
# 需要导入模块: from sklearn.model_selection import GridSearchCV [as 别名]
# 或者: from sklearn.model_selection.GridSearchCV import set_params [as 别名]
def train_regression(self, X, y, model_path, best_model_path, regression_method):
import glob
from sklearn.model_selection import GridSearchCV
model_dir = '/'.join(model_path.split('/')[:-1])
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if(regression_method == "SSGPR"):
freq_noisy = random.choice([True, False])
model = SSGP(int(np.random.randint(n/15)+n/20), freq_noisy)
print("TRY NEW MODEL [SSGP %d (%s)]......" % (model.m,
("NOISY" if freq_noisy else "NOT_NOISY")))
model.fit(X.copy(), y.copy())
elif(regression_method == "KernelRidge"):
from sklearn.kernel_ridge import KernelRidge
model = GridSearchCV(
KernelRidge(kernel='rbf', gamma=0.1),
cv=5,
n_jobs=2,
verbose=True,
scoring='neg_mean_squared_error',
param_grid={
"alpha": [1, 1e-1, 1e-2, 1e-3],
"gamma": np.logspace(-3, 3, 10)
}
)
model.fit(X, y.ravel())
model = model.best_estimator_
elif(regression_method == "DecisionTreeRegressor"):
from sklearn.tree import DecisionTreeRegressor
model = GridSearchCV(
DecisionTreeRegressor(max_depth=10),
cv=5,
verbose=True,
scoring='neg_mean_squared_error',
param_grid={
"min_samples_split": list(range(2, 16)),
'criterion':['mse', 'mae']
}
)
model.fit(X, y.ravel())
model = model.best_estimator_
elif(regression_method == "AdaBoostDecisionTreeRegressor"):
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
boost_model = GridSearchCV(
DecisionTreeRegressor(max_depth=10),
cv=5,
n_jobs=2,
verbose=True,
scoring='neg_mean_squared_error',
param_grid={
"min_samples_split": list(range(2, 16)),
'criterion':['mse', 'mae']
}
)
boost_model.fit(X, y.ravel())
boost_model = boost_model.best_estimator_
boost_model.set_params(max_depth=5)
model = GridSearchCV(
AdaBoostRegressor(boost_model),
cv=5,
n_jobs=2,
verbose=True,
scoring='neg_mean_squared_error',
param_grid={
"n_estimators": list(range(10, 31, 10)),
"loss": ["linear", "square", "exponential"]
}
)
model.fit(X, y.ravel())
model = model.best_estimator_
elif(regression_method == "GradientBoostingRegressor"):
from sklearn.ensemble import GradientBoostingRegressor
model = GridSearchCV(
GradientBoostingRegressor(),
cv=5,
n_jobs=2,
verbose=True,
scoring='neg_mean_squared_error',
param_grid={
"min_samples_split": list(range(2, 16)),
'loss':['ls', 'lad', 'huber', 'quantile'],
"n_estimators": list(range(100, 301, 50)),
}
)
model.fit(X, y.ravel())
model = model.best_estimator_
elif(regression_method == "RandomForestRegressor"):
from sklearn.ensemble import RandomForestRegressor
model = GridSearchCV(
RandomForestRegressor(max_depth=6),
cv=5,
n_jobs=2,
verbose=True,
scoring='neg_mean_squared_error',
param_grid={
'criterion':['mse', 'mae'],
"n_estimators": list(range(5, 26, 5)),
}
)
#.........这里部分代码省略.........