當前位置: 首頁>>代碼示例>>Python>>正文


Python preprocessing.PolynomialFeatures方法代碼示例

本文整理匯總了Python中sklearn.preprocessing.PolynomialFeatures方法的典型用法代碼示例。如果您正苦於以下問題:Python preprocessing.PolynomialFeatures方法的具體用法?Python preprocessing.PolynomialFeatures怎麽用?Python preprocessing.PolynomialFeatures使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.preprocessing的用法示例。


在下文中一共展示了preprocessing.PolynomialFeatures方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: fit

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def fit(self, x, y=None):
        if y is not None:
            xdot = y
        else:
            xdot = self.derivative.transform(x)

        if self.operators is not None:
            feature_transformer = SymbolicFeatures(
                exponents=np.linspace(1, self.degree, self.degree), operators=self.operators
            )
        else:
            feature_transformer = PolynomialFeatures(degree=self.degree, include_bias=False)

        steps = [
            ("features", feature_transformer),
            ("model", STRidge(alpha=self.alpha, threshold=self.threshold, **self.kw)),
        ]
        self.model = MultiOutputRegressor(Pipeline(steps), n_jobs=self.n_jobs)
        self.model.fit(x, xdot)

        self.n_input_features_ = self.model.estimators_[0].steps[0][1].n_input_features_
        self.n_output_features_ = self.model.estimators_[0].steps[0][1].n_output_features_
        return self 
開發者ID:Ohjeah,項目名稱:sparsereg,代碼行數:25,代碼來源:sindy.py

示例2: test_transformed_shape

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_transformed_shape(self):
        # checks if the transformed objects have the correct columns
        a = dpp.PolynomialFeatures()
        a.fit(X)
        n_cols = len(a.get_feature_names())
        # dask array
        assert a.transform(X).shape[1] == n_cols
        # numpy array
        assert a.transform(X.compute()).shape[1] == n_cols
        # dask dataframe
        assert a.transform(df).shape[1] == n_cols
        # pandas dataframe
        assert a.transform(df.compute()).shape[1] == n_cols
        X_nan_rows = df.values
        df_none_divisions = X_nan_rows.to_dask_dataframe(columns=df.columns)
        # dask array with nan rows
        assert a.transform(X_nan_rows).shape[1] == n_cols
        # dask data frame with nan rows
        assert a.transform(df_none_divisions).shape[1] == n_cols 
開發者ID:dask,項目名稱:dask-ml,代碼行數:21,代碼來源:test_data.py

示例3: test_model_polynomial_features_float_degree_2

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_model_polynomial_features_float_degree_2(self):
        X = np.array([[1.2, 3.2, 1.3, -5.6], [4.3, -3.2, 5.7, 1.0],
                      [0, 3.2, 4.7, -8.9]])
        model = PolynomialFeatures(degree=2).fit(X)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn polynomial features",
            [("input", FloatTensorType([None, X.shape[1]]))],
        )
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            X.astype(np.float32),
            model,
            model_onnx,
            basename="SklearnPolynomialFeaturesFloatDegree2",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
開發者ID:onnx,項目名稱:sklearn-onnx,代碼行數:20,代碼來源:test_sklearn_polynomial_features_converter.py

示例4: test_model_polynomial_features_int_degree_2

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_model_polynomial_features_int_degree_2(self):
        X = np.array([
            [1, 3, 4, 0],
            [2, 3, 4, 1],
            [1, -4, 3, 7],
            [3, 10, -9, 5],
            [1, 0, 10, 5],
        ])
        model = PolynomialFeatures(degree=2).fit(X)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn polynomial features",
            [("input", Int64TensorType([None, X.shape[1]]))],
        )
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            X.astype(np.int64),
            model,
            model_onnx,
            basename="SklearnPolynomialFeaturesIntDegree2",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
開發者ID:onnx,項目名稱:sklearn-onnx,代碼行數:25,代碼來源:test_sklearn_polynomial_features_converter.py

示例5: test_model_polynomial_features_float_degree_3

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_model_polynomial_features_float_degree_3(self):
        X = np.array([[1.2, 3.2, 1.2], [4.3, 3.2, 4.5], [3.2, 4.7, 1.1]])
        model = PolynomialFeatures(degree=3).fit(X)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn polynomial features",
            [("input", FloatTensorType([None, X.shape[1]]))],
        )
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            X.astype(np.float32),
            model,
            model_onnx,
            basename="SklearnPolynomialFeaturesFloatDegree3",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
開發者ID:onnx,項目名稱:sklearn-onnx,代碼行數:19,代碼來源:test_sklearn_polynomial_features_converter.py

示例6: test_model_polynomial_features_int_degree_3

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_model_polynomial_features_int_degree_3(self):
        X = np.array([
            [1, 3, 33],
            [4, 1, -11],
            [3, 7, -3],
            [3, 5, 4],
            [1, 0, 3],
            [5, 4, 9],
        ])
        model = PolynomialFeatures(degree=3).fit(X)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn polynomial features",
            [("input", Int64TensorType([None, X.shape[1]]))],
        )
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            X.astype(np.int64),
            model,
            model_onnx,
            basename="SklearnPolynomialFeaturesIntDegree3",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
開發者ID:onnx,項目名稱:sklearn-onnx,代碼行數:26,代碼來源:test_sklearn_polynomial_features_converter.py

示例7: test_model_polynomial_features_float_degree_4

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_model_polynomial_features_float_degree_4(self):
        X = np.array([[1.2, 3.2, 3.1, 1.3], [4.3, 3.2, 0.5, 1.3],
                      [3.2, 4.7, 5.4, 7.1]])
        model = PolynomialFeatures(degree=4).fit(X)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn polynomial features",
            [("input", FloatTensorType([None, X.shape[1]]))],
        )
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            X.astype(np.float32),
            model,
            model_onnx,
            basename="SklearnPolynomialFeaturesFloatDegree4-Dec4",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
開發者ID:onnx,項目名稱:sklearn-onnx,代碼行數:20,代碼來源:test_sklearn_polynomial_features_converter.py

示例8: test_objectmapper

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.preprocessing.Binarizer, pp.Binarizer)
        self.assertIs(df.preprocessing.FunctionTransformer,
                      pp.FunctionTransformer)
        self.assertIs(df.preprocessing.Imputer, pp.Imputer)
        self.assertIs(df.preprocessing.KernelCenterer, pp.KernelCenterer)
        self.assertIs(df.preprocessing.LabelBinarizer, pp.LabelBinarizer)
        self.assertIs(df.preprocessing.LabelEncoder, pp.LabelEncoder)
        self.assertIs(df.preprocessing.MultiLabelBinarizer, pp.MultiLabelBinarizer)
        self.assertIs(df.preprocessing.MaxAbsScaler, pp.MaxAbsScaler)
        self.assertIs(df.preprocessing.MinMaxScaler, pp.MinMaxScaler)
        self.assertIs(df.preprocessing.Normalizer, pp.Normalizer)
        self.assertIs(df.preprocessing.OneHotEncoder, pp.OneHotEncoder)
        self.assertIs(df.preprocessing.PolynomialFeatures, pp.PolynomialFeatures)
        self.assertIs(df.preprocessing.RobustScaler, pp.RobustScaler)
        self.assertIs(df.preprocessing.StandardScaler, pp.StandardScaler) 
開發者ID:pandas-ml,項目名稱:pandas-ml,代碼行數:19,代碼來源:test_preprocessing.py

示例9: polynomial_regression

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def polynomial_regression(self, assign=True, degree=2, **kwargs):
        """
        有監督學習回歸器,使用:
            make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs))

        :param assign: 是否保存實例後的LinearRegression對象,默認True,self.reg = reg
        :param degree: 多項式擬合參數,默認2
        :param kwargs: 由make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs))
                       即關鍵字參數**kwargs全部傳遞給LinearRegression做為構造參數

        :return: 實例化的回歸對象
        """
        reg = make_pipeline(PolynomialFeatures(degree), LinearRegression(**kwargs))
        if assign:
            self.reg = reg
        return reg 
開發者ID:bbfamily,項目名稱:abu,代碼行數:18,代碼來源:ABuMLCreater.py

示例10: sample_1031_3

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def sample_1031_3():
    """
    10.3.1_3 豬老三使用回歸預測股價:PolynomialFeatures
    :return:
    """
    train_x, train_y_regress, train_y_classification, pig_three_feature, \
    test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()

    from sklearn.pipeline import make_pipeline
    from sklearn.preprocessing import PolynomialFeatures
    from sklearn.linear_model import LinearRegression

    # pipeline套上 degree=3 + LinearRegression
    estimator = make_pipeline(PolynomialFeatures(degree=3),
                              LinearRegression())
    # 繼續使用regress_process,區別是estimator變了
    regress_process(estimator, train_x, train_y_regress, test_x,
                    test_y_regress)
    plt.show() 
開發者ID:bbfamily,項目名稱:abu,代碼行數:21,代碼來源:c10.py

示例11: evaluate_timestamp

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def evaluate_timestamp(self, timestamp):
        """
        Gets datetime object and calculates as a prediction or as an
        interpolation

        - timestamp: datetime object (date_1/date_2 in `calculate`)
        > Returns float of prediction or interpolation
        """

        if (
            datetime.date(1993, 1, 15) > timestamp.date()
            or datetime.date(2019, 2, 7) < timestamp.date()
        ):
            # Perform some data preparation before being
            # able to pass it to the model
            return self.poly_model.predict(
                PolynomialFeatures(degree=3).fit_transform(
                    np.array([timestamp.timestamp()]).reshape(1, -1)
                )
            )[0][0]

        return self.model(timestamp.timestamp()) 
開發者ID:python-discord,項目名稱:code-jam-5,代碼行數:24,代碼來源:difference_calc.py

示例12: poly_inter

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def poly_inter(self, data):
        # define x values for data points
        X = np.linspace(0, data.shape[0] - 1, data.shape[0])[:, np.newaxis]
        
        # define pipeline and fit model
        model = make_pipeline(PolynomialFeatures(self.degree), Ridge())
        model.fit(X, data)

        if self.plot: plot_poly(X, model.predict(X), data)
        
        # predict next interpolated value
        last = model.predict(np.array([[data.shape[0] - 1]]))
        pred = model.predict(np.array([[data.shape[0]]]))

        # return slope of last point
        return pred[0]/last[0] 
開發者ID:SC4RECOIN,項目名稱:LSTM-Crypto-Price-Prediction,代碼行數:18,代碼來源:poly_interpolation.py

示例13: feature_transform

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def feature_transform(X, mode='polynomial', degree=1):

        poly = PolynomialFeatures(degree)
        process_X = poly.fit_transform(X)

        if mode == 'legendre':
            lege = legendre(degree)
            process_X = lege(process_X)

        return process_X 
開發者ID:fukuball,項目名稱:fuku-ml,代碼行數:12,代碼來源:Utility.py

示例14: polyfeatures

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def polyfeatures(X):
    poly = PolynomialFeatures(degree=2, include_bias=False, interaction_only=False)
    X_poly = poly.fit_transform(X)
    X = pd.DataFrame(X_poly, columns=poly.get_feature_names())
    return X 
開發者ID:DominickZhang,項目名稱:KDDCup2019_admin,代碼行數:7,代碼來源:preprocess.py

示例15: learn_on_k_best

# 需要導入模塊: from sklearn import preprocessing [as 別名]
# 或者: from sklearn.preprocessing import PolynomialFeatures [as 別名]
def learn_on_k_best(archive: utils.Archive[utils.MultiValue], k: int) -> ArrayLike:
    """Approximate optimum learnt from the k best.

    Parameters
    ----------
    archive: utils.Archive[utils.Value]
    """
    items = list(archive.items_as_arrays())
    dimension = len(items[0][0])

    # Select the k best.
    first_k_individuals = [x for x in sorted(items, key=lambda indiv: archive[indiv[0]].get_estimation("pessimistic"))[:k]]
    assert len(first_k_individuals) == k

    # Recenter the best.
    middle = np.array(sum(p[0] for p in first_k_individuals) / k)
    normalization = 1e-15 + np.sqrt(np.sum((first_k_individuals[-1][0] - first_k_individuals[0][0])**2))
    y = [archive[c[0]].get_estimation("pessimistic") for c in first_k_individuals]
    X = np.asarray([(c[0] - middle) / normalization for c in first_k_individuals])

    # We need SKLearn.
    from sklearn.linear_model import LinearRegression
    from sklearn.preprocessing import PolynomialFeatures
    polynomial_features = PolynomialFeatures(degree=2)
    X2 = polynomial_features.fit_transform(X)

    # Fit a linear model.
    model = LinearRegression()
    model.fit(X2, y)

    # Find the minimum of the quadratic model.
    optimizer = OnePlusOne(parametrization=dimension, budget=dimension * dimension + dimension + 500)
    try:
        optimizer.minimize(lambda x: float(model.predict(polynomial_features.fit_transform(np.asarray([x])))))
    except ValueError:
        raise InfiniteMetaModelOptimum("Infinite meta-model optimum in learn_on_k_best.")

    minimum = optimizer.provide_recommendation().value
    if np.sum(minimum**2) > 1.:
        raise InfiniteMetaModelOptimum("huge meta-model optimum in learn_on_k_best.")
    return middle + normalization * minimum 
開發者ID:facebookresearch,項目名稱:nevergrad,代碼行數:43,代碼來源:optimizerlib.py


注:本文中的sklearn.preprocessing.PolynomialFeatures方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。