本文整理汇总了Python中sklearn.pipeline.Pipeline._pre_transform方法的典型用法代码示例。如果您正苦于以下问题:Python Pipeline._pre_transform方法的具体用法?Python Pipeline._pre_transform怎么用?Python Pipeline._pre_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.pipeline.Pipeline
的用法示例。
在下文中一共展示了Pipeline._pre_transform方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: middle_transformations
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import _pre_transform [as 别名]
def middle_transformations(self, est, X, y):
if len(est.steps) > 2:
tmp = Pipeline([(name, obj) for name, obj in est.steps[1:-1]] + [("dummy", DummyClassifier())])
transformed_data, fit_params = tmp._pre_transform(X, y)
return transformed_data
else:
return X
示例2: BasePipeline
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import _pre_transform [as 别名]
#.........这里部分代码省略.........
steps.append((preproc_name, preprocessor_object))
# Extract Estimator Hyperparameters from the configuration object
estimator_name = self._get_pipeline()[-1][0]
estimator_object = self._get_pipeline()[-1][1]
estimator_parameters = {}
for instantiated_hyperparameter in self.configuration:
if not instantiated_hyperparameter.startswith(estimator_name):
continue
if self.configuration[instantiated_hyperparameter] is None:
continue
name_ = instantiated_hyperparameter.split(":")[-1]
estimator_parameters[name_] = self.configuration[
instantiated_hyperparameter]
estimator_parameters.update(init_params_per_method[estimator_name])
estimator_object = estimator_object(random_state=self.random_state,
**estimator_parameters)
# Ducktyping...
if hasattr(estimator_object, 'get_components'):
estimator_object = estimator_object.choice
steps.append((estimator_name, estimator_object))
self.pipeline_ = Pipeline(steps)
if fit_params is None or not isinstance(fit_params, dict):
fit_params = dict()
else:
fit_params = {key.replace(":", "__"): value for key, value in
fit_params.items()}
X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)
return X, fit_params
def fit_estimator(self, X, y, fit_params=None):
check_is_fitted(self, 'pipeline_')
if fit_params is None:
fit_params = {}
self.pipeline_.steps[-1][-1].fit(X, y, **fit_params)
return self
def iterative_fit(self, X, y, fit_params=None, n_iter=1):
check_is_fitted(self, 'pipeline_')
if fit_params is None:
fit_params = {}
self.pipeline_.steps[-1][-1].iterative_fit(X, y, n_iter=n_iter,
**fit_params)
def estimator_supports_iterative_fit(self):
return hasattr(self.pipeline_.steps[-1][-1], 'iterative_fit')
def configuration_fully_fitted(self):
check_is_fitted(self, 'pipeline_')
return self.pipeline_.steps[-1][-1].configuration_fully_fitted()
def predict(self, X, batch_size=None):
"""Predict the classes using the selected model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
batch_size: int or None, defaults to None
batch_size controls whether the pipeline will be
示例3: EncodingModel
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import _pre_transform [as 别名]
#.........这里部分代码省略.........
self.sfreq = sfreq
# Delay X
X, y, labels, names = _build_design_matrix(X, y, sfreq, self.times,
self.delays, self.tmin,
self.tmax, feat_names)
self.feat_names = np.array(names)
cv = _check_cv(X, labels, cv, cv_params)
# Define names for input variabels to keep track of time delays
X_names = [(feat, delay)
for delay in self.delays for feat in self.feat_names]
self.coef_names = np.array(X_names)
# Build model instance
if not isinstance(self.est, Pipeline):
self.est = Pipeline([('est', self.est)])
# Create model metadata that we'll add to the obj later
model_data = dict(coefs_all_=[], scores_=[])
if isinstance(self.est.steps[-1][-1], GridSearchCV):
model_data.update(dict(best_estimators_=[], best_params_=[]))
# Fit the model and collect model results
if verbose is True:
cv = tqdm(cv)
for i, (tr, tt) in enumerate(cv):
X_tr = X[:, tr].T
X_tt = X[:, tt].T
y_tr = y[tr]
y_tt = y[tt]
if self.preproc_y:
y_tr, y_tt = [self.est._pre_transform(i)[0] for i in [y_tr, y_tt]]
self.est.fit(X_tr, y_tr)
mod = deepcopy(self.est.steps[-1][-1])
if isinstance(mod, GridSearchCV):
# If it's a GridSearch, then add a "best_params" object
# Assume hyperparameter search
if mod.refit:
model_data['best_estimators_'].append(mod.best_estimator_)
model_data['coefs_all_'].append(mod.best_estimator_.coef_)
model_data['best_params_'].append(mod.best_params_)
else:
model_data['coefs_all_'].append(mod.coef_)
# Fit model + make predictions
scr = self.scorer(y_tt, self.est.predict(X_tt))
model_data['scores_'].append(scr)
for key, val in model_data.iteritems():
setattr(self, key, np.array(val))
self.coefs_ = np.mean(self.coefs_all_, axis=0)
self.cv = cv
def predict(self, X):
"""Generate predictions using a fit receptive field model.
This uses the `coef_` attribute for predictions.
"""
X_lag = delay_timeseries(X, self.sfreq, self.delays)
Xt = self.est._pre_transform(X_lag.T)[0]
return np.dot(Xt, self.coefs_)