当前位置: 首页>>代码示例>>Python>>正文


Python Pipeline._pre_transform方法代码示例

本文整理汇总了Python中sklearn.pipeline.Pipeline._pre_transform方法的典型用法代码示例。如果您正苦于以下问题:Python Pipeline._pre_transform方法的具体用法?Python Pipeline._pre_transform怎么用?Python Pipeline._pre_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.pipeline.Pipeline的用法示例。


在下文中一共展示了Pipeline._pre_transform方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: middle_transformations

# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import _pre_transform [as 别名]
 def middle_transformations(self, est, X, y):
     if len(est.steps) > 2:
         tmp = Pipeline([(name, obj) for name, obj in est.steps[1:-1]] + [("dummy", DummyClassifier())])
         transformed_data, fit_params = tmp._pre_transform(X, y)
         return transformed_data
     else:
         return X
开发者ID:alfiya400,项目名称:kaggle-CrowdFlowerRelevance,代码行数:9,代码来源:model.py

示例2: BasePipeline

# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import _pre_transform [as 别名]

#.........这里部分代码省略.........

            steps.append((preproc_name, preprocessor_object))

        # Extract Estimator Hyperparameters from the configuration object
        estimator_name = self._get_pipeline()[-1][0]
        estimator_object = self._get_pipeline()[-1][1]
        estimator_parameters = {}
        for instantiated_hyperparameter in self.configuration:
            if not instantiated_hyperparameter.startswith(estimator_name):
                continue
            if self.configuration[instantiated_hyperparameter] is None:
                continue

            name_ = instantiated_hyperparameter.split(":")[-1]
            estimator_parameters[name_] = self.configuration[
                instantiated_hyperparameter]

        estimator_parameters.update(init_params_per_method[estimator_name])
        estimator_object = estimator_object(random_state=self.random_state,
                            **estimator_parameters)

        # Ducktyping...
        if hasattr(estimator_object, 'get_components'):
            estimator_object = estimator_object.choice

        steps.append((estimator_name, estimator_object))

        self.pipeline_ = Pipeline(steps)
        if fit_params is None or not isinstance(fit_params, dict):
            fit_params = dict()
        else:
            fit_params = {key.replace(":", "__"): value for key, value in
                          fit_params.items()}
        X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)
        return X, fit_params

    def fit_estimator(self, X, y, fit_params=None):
        check_is_fitted(self, 'pipeline_')
        if fit_params is None:
            fit_params = {}
        self.pipeline_.steps[-1][-1].fit(X, y, **fit_params)
        return self

    def iterative_fit(self, X, y, fit_params=None, n_iter=1):
        check_is_fitted(self, 'pipeline_')
        if fit_params is None:
            fit_params = {}
        self.pipeline_.steps[-1][-1].iterative_fit(X, y, n_iter=n_iter,
                                                   **fit_params)

    def estimator_supports_iterative_fit(self):
        return hasattr(self.pipeline_.steps[-1][-1], 'iterative_fit')

    def configuration_fully_fitted(self):
        check_is_fitted(self, 'pipeline_')
        return self.pipeline_.steps[-1][-1].configuration_fully_fitted()

    def predict(self, X, batch_size=None):
        """Predict the classes using the selected model.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)

        batch_size: int or None, defaults to None
            batch_size controls whether the pipeline will be
开发者ID:stokasto,项目名称:auto-sklearn,代码行数:70,代码来源:base.py

示例3: EncodingModel

# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import _pre_transform [as 别名]

#.........这里部分代码省略.........
        self.sfreq = sfreq

        # Delay X
        X, y, labels, names = _build_design_matrix(X, y, sfreq, self.times,
                                                   self.delays, self.tmin,
                                                   self.tmax, feat_names)
        self.feat_names = np.array(names)
        cv = _check_cv(X, labels, cv, cv_params)

        # Define names for input variabels to keep track of time delays
        X_names = [(feat, delay)
                   for delay in self.delays for feat in self.feat_names]
        self.coef_names = np.array(X_names)

        # Build model instance
        if not isinstance(self.est, Pipeline):
            self.est = Pipeline([('est', self.est)])

        # Create model metadata that we'll add to the obj later
        model_data = dict(coefs_all_=[], scores_=[])
        if isinstance(self.est.steps[-1][-1], GridSearchCV):
            model_data.update(dict(best_estimators_=[], best_params_=[]))

        # Fit the model and collect model results
        if verbose is True:
            cv = tqdm(cv)
        for i, (tr, tt) in enumerate(cv):
            X_tr = X[:, tr].T
            X_tt = X[:, tt].T
            y_tr = y[tr]
            y_tt = y[tt]

            if self.preproc_y:
                y_tr, y_tt = [self.est._pre_transform(i)[0] for i in [y_tr, y_tt]]
            self.est.fit(X_tr, y_tr)

            mod = deepcopy(self.est.steps[-1][-1])
            if isinstance(mod, GridSearchCV):
                # If it's a GridSearch, then add a "best_params" object
                # Assume hyperparameter search
                if mod.refit:
                    model_data['best_estimators_'].append(mod.best_estimator_)
                    model_data['coefs_all_'].append(mod.best_estimator_.coef_)
                model_data['best_params_'].append(mod.best_params_)
            else:
                model_data['coefs_all_'].append(mod.coef_)

            # Fit model + make predictions
            scr = self.scorer(y_tt, self.est.predict(X_tt))
            model_data['scores_'].append(scr)

        for key, val in model_data.iteritems():
            setattr(self, key, np.array(val))
        self.coefs_ = np.mean(self.coefs_all_, axis=0)
        self.cv = cv

    def predict(self, X):
        """Generate predictions using a fit receptive field model.

        This uses the `coef_` attribute for predictions.
        """
        X_lag = delay_timeseries(X, self.sfreq, self.delays)

        Xt = self.est._pre_transform(X_lag.T)[0]
        return np.dot(Xt, self.coefs_)
开发者ID:kingjr,项目名称:ecogtools,代码行数:69,代码来源:strf.py


注:本文中的sklearn.pipeline.Pipeline._pre_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。