本文整理汇总了Python中sklearn.model_selection.GridSearchCV方法的典型用法代码示例。如果您正苦于以下问题:Python model_selection.GridSearchCV方法的具体用法?Python model_selection.GridSearchCV怎么用?Python model_selection.GridSearchCV使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.model_selection
的用法示例。
在下文中一共展示了model_selection.GridSearchCV方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: search_cv
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def search_cv(x_train, y_train, x_test, y_test, model=GradientBoostingClassifier(n_estimators=30)):
# grid search找到最好的参数
parameters = {'kernel': ('linear', 'rbf'), 'C': [1, 2, 4], 'gamma': [0.125, 0.25, 0.5, 1, 2, 4]}
clf = GridSearchCV(model, param_grid=parameters)
grid_search = clf.fit(x_train, y_train)
# 对结果打分
print("Best score: %0.3f" % grid_search.best_score_)
print(grid_search.best_estimator_)
# best prarams
print('best prarams:', clf.best_params_)
print('-----grid search end------------')
print('on all train set')
scores = cross_val_score(grid_search.best_estimator_, x_train, y_train, cv=3, scoring='accuracy')
print(scores.mean(), scores)
print('on test set')
scores = cross_val_score(grid_search.best_estimator_, x_test, y_test, cv=3, scoring='accuracy')
print(scores.mean(), scores)
示例2: __tune_parameters
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def __tune_parameters(self):
for i in range(len(self.algorithms)):
if self.verbose:
print(' %s' % self.algorithms[i].name)
estimator = self.algorithms[i].estimator
parameters = self.algorithms[i].parameters
clf = GridSearchCV(
estimator, parameters, cv=self.cv, scoring=self.scoring,
iid=False, n_jobs=self.n_jobs)
clf.fit(self.data.X, self.data.y)
grid_scores = []
for j in range(len(clf.cv_results_['mean_test_score'])):
grid_scores.append((clf.cv_results_['params'][j],
clf.cv_results_['mean_test_score'][j],
clf.cv_results_['std_test_score'][j]))
self.algorithms[i].estimator = clf.best_estimator_
self.algorithms[i].best_score = clf.best_score_
self.algorithms[i].best_params = clf.best_params_
self.algorithms[i].grid_scores = grid_scores
self.__search_best_algorithm()
示例3: compute_accuracy_svc
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def compute_accuracy_svc(
data_train,
labels_train,
data_test,
labels_test,
param_grid=None,
verbose=0,
max_iter=-1,
):
if param_grid is None:
param_grid = [
{"C": [1, 10, 100, 1000], "kernel": ["linear"]},
{"C": [1, 10, 100, 1000], "gamma": [0.001, 0.0001], "kernel": ["rbf"]},
]
svc = SVC(max_iter=max_iter)
clf = GridSearchCV(svc, param_grid, verbose=verbose, cv=3)
return compute_accuracy_classifier(
clf, data_train, labels_train, data_test, labels_test
)
示例4: get_full_rbf_svm_clf
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def get_full_rbf_svm_clf(train_x, train_y, c_range=None, gamma_range=None):
param_grid = dict(gamma=gamma_range, C=c_range)
cv = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(cache_size=1024), param_grid=param_grid, cv=cv, n_jobs=14, verbose=10)
grid.fit(train_x, train_y)
print("The best parameters are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_))
scores = grid.cv_results_['mean_test_score'].reshape(len(c_range), len(gamma_range))
print("Scores:")
print(scores)
print("c_range:", c_range)
print("gamma_range:", gamma_range)
c_best = grid.best_params_['C']
gamma_best = grid.best_params_['gamma']
clf = SVC(C=c_best, gamma=gamma_best, verbose=True)
return clf
#----------------
示例5: fit
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def fit(self, X, y=None, groups=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape=(n_samples,) or (n_samples, n_output), optional (default=None)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, shape=(n_samples,), optional (default=None)
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return super(GridSearchCV, self).fit(X, _as_numpy(y), groups)
示例6: paramTuning
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def paramTuning(features_train, labels_train, nfolds):
#using the training data and define the number of folds
#determine the range of the Cs range you want to search
Cs = [0.001, 0.01, 0.1 ,1, 10, 100, 1000, 10000]
#determine the range of the gammas range you want to search
gammas = [0.00000001 ,0.00000001 ,0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1 , 1, 10, 100, 1000]
#make the dictioanry
param_grid = {'C': Cs, 'gamma': gammas}
#start the greedy search using all the matching sets from above
grid_search = GridSearchCV(SVC(kernel='poly'),param_grid,cv=nfolds)
#fit your training data
grid_search.fit(features_train, labels_train)
#visualize the best couple of parameters
print grid_search.best_params_
开发者ID:gionanide,项目名称:Speech_Signal_Processing_and_Classification,代码行数:21,代码来源:kpca_lda_knn_equalizeClasses.py
示例7: paramTuning
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def paramTuning(features_train, labels_train, nfolds):
#using the training data and define the number of folds
#determine the range of the Cs range you want to search
Cs = [0.001 ,0.01 ,0.1 ,1 , 10, 100, 1000, 10000]
#determine the range of the gammas range you want to search
gammas = [0.00000001 ,0.00000001 ,0.0000001, 0.000001, 0.00001 , 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]
#make the dictioanry
param_grid = {'C': Cs, 'gamma': gammas}
#start the greedy search using all the matching sets from above
grid_search = GridSearchCV(SVC(kernel='rbf'),param_grid,cv=nfolds)
#fit your training data
grid_search.fit(features_train, labels_train)
#visualize the best couple of parameters
print grid_search.best_params_
开发者ID:gionanide,项目名称:Speech_Signal_Processing_and_Classification,代码行数:21,代码来源:kpca_lda_knn_multiclass.py
示例8: paramTuning
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def paramTuning(features_train, labels_train, nfolds):
#using the training data and define the number of folds
#determine the range of the Cs range you want to search
Cs = [1, 10, 100, 1000, 10000]
#determine the range of the gammas range you want to search
gammas = [0.00000001 ,0.00000001 ,0.0000001, 0.000001, 0.00001]
#make the dictioanry
param_grid = {'C': Cs, 'gamma': gammas}
#start the greedy search using all the matching sets from above
grid_search = GridSearchCV(SVC(kernel='rbf'),param_grid,cv=nfolds)
#fit your training data
grid_search.fit(features_train, labels_train)
#visualize the best couple of parameters
return grid_search.best_params_
开发者ID:gionanide,项目名称:Speech_Signal_Processing_and_Classification,代码行数:21,代码来源:svm_keeping_supportVectors.py
示例9: paramTuning
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def paramTuning(features_train, labels_train, nfolds):
#using the training data and define the number of folds
#determine the range of the Cs range you want to search
Cs = [1, 10, 100, 1000, 10000]
#determine the range of the gammas range you want to search
gammas = [0.00000001 ,0.00000001 ,0.0000001, 0.000001, 0.00001]
#make the dictioanry
param_grid = {'C': Cs, 'gamma': gammas}
#start the greedy search using all the matching sets from above
grid_search = GridSearchCV(SVC(kernel='rbf'),param_grid,cv=nfolds)
#fit your training data
grid_search.fit(features_train, labels_train)
#visualize the best couple of parameters
print grid_search.best_params_
示例10: paramTuning
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def paramTuning(features_train, labels_train, nfolds):
#using the training data and define the number of folds
#determine the range of the Cs range you want to search
Cs = [1000, 10000, 10000, 1000000]
#determine the range of the gammas range you want to search
gammas = [0.00000001 ,0.00000001 ,0.0000001, 0.000001, 0.00001]
#make the dictioanry
param_grid = {'C': Cs, 'gamma': gammas}
#start the greedy search using all the matching sets from above
grid_search = GridSearchCV(SVC(kernel='rbf'),param_grid,cv=nfolds)
#fit your training data
grid_search.fit(features_train, labels_train)
#visualize the best couple of parameters
return grid_search.best_params_
开发者ID:gionanide,项目名称:Speech_Signal_Processing_and_Classification,代码行数:21,代码来源:svm_balancedSampleNumber_greedySearch.py
示例11: test_gridsearch
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
示例12: test_check_scoring_gridsearchcv
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert isinstance(scorer, _PredictScorer)
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert isinstance(scorer, _PredictScorer)
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
示例13: test_imputation_pipeline_grid_search
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
X = sparse_random_matrix(100, 100, density=0.10)
missing_values = X.data[0]
pipeline = Pipeline([('imputer',
SimpleImputer(missing_values=missing_values)),
('tree',
tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"]
}
Y = sparse_random_matrix(100, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
示例14: test_set_params_passes_all_parameters
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def test_set_params_passes_all_parameters():
# Make sure all parameters are passed together to set_params
# of nested estimator. Regression test for #9944
class TestDecisionTree(DecisionTreeClassifier):
def set_params(self, **kwargs):
super().set_params(**kwargs)
# expected_kwargs is in test scope
assert kwargs == expected_kwargs
return self
expected_kwargs = {'max_depth': 5, 'min_samples_leaf': 2}
for est in [Pipeline([('estimator', TestDecisionTree())]),
GridSearchCV(TestDecisionTree(), {})]:
est.set_params(estimator__max_depth=5,
estimator__min_samples_leaf=2)
示例15: test_ridgecv_sample_weight
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import GridSearchCV [as 别名]
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
gs = GridSearchCV(Ridge(), parameters, cv=cv)
gs.fit(X, y, sample_weight=sample_weight)
assert ridgecv.alpha_ == gs.best_estimator_.alpha
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)