當前位置: 首頁>>代碼示例>>Python>>正文


Python linear_model.Ridge方法代碼示例

本文整理匯總了Python中sklearn.linear_model.Ridge方法的典型用法代碼示例。如果您正苦於以下問題:Python linear_model.Ridge方法的具體用法?Python linear_model.Ridge怎麽用?Python linear_model.Ridge使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.linear_model的用法示例。


在下文中一共展示了linear_model.Ridge方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_cross_val_score_with_score_func_regression

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_cross_val_score_with_score_func_regression():
    X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
                           random_state=0)
    reg = Ridge()

    # Default score of the Ridge regression estimator
    scores = cross_val_score(reg, X, y, cv=5)
    assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)

    # R2 score (aka. determination coefficient) - should be the
    # same as the default estimator score
    r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
    assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)

    # Mean squared error; this is a loss function, so "scores" are negative
    neg_mse_scores = cross_val_score(reg, X, y, cv=5,
                                     scoring="neg_mean_squared_error")
    expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
    assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)

    # Explained variance
    scoring = make_scorer(explained_variance_score)
    ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
    assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:26,代碼來源:test_validation.py

示例2: test_classes__property

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_classes__property():
    # Test that classes_ property matches best_estimator_.classes_
    X = np.arange(100).reshape(10, 10)
    y = np.array([0] * 5 + [1] * 5)
    Cs = [.1, 1, 10]

    grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
    grid_search.fit(X, y)
    assert_array_equal(grid_search.best_estimator_.classes_,
                       grid_search.classes_)

    # Test that regressors do not have a classes_ attribute
    grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
    grid_search.fit(X, y)
    assert not hasattr(grid_search, 'classes_')

    # Test that the grid searcher has no classes_ attribute before it's fit
    grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
    assert not hasattr(grid_search, 'classes_')

    # Test that the grid searcher has no classes_ attribute without a refit
    grid_search = GridSearchCV(LinearSVC(random_state=0),
                               {'C': Cs}, refit=False)
    grid_search.fit(X, y)
    assert not hasattr(grid_search, 'classes_') 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:27,代碼來源:test_search.py

示例3: test_empty_cv_iterator_error

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_empty_cv_iterator_error():
    # Use global X, y

    # create cv
    cv = KFold(n_splits=3).split(X)

    # pop all of it, this should cause the expected ValueError
    [u for u in cv]
    # cv is empty now

    train_size = 100
    ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
                               cv=cv, n_jobs=-1)

    # assert that this raises an error
    with pytest.raises(ValueError,
                       match='No fits were performed. '
                             'Was the CV iterator empty\\? '
                             'Were there no candidates\\?'):
        ridge.fit(X[:train_size], y[:train_size]) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:22,代碼來源:test_search.py

示例4: test_random_search_bad_cv

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_random_search_bad_cv():
    # Use global X, y

    class BrokenKFold(KFold):
        def get_n_splits(self, *args, **kw):
            return 1

    # create bad cv
    cv = BrokenKFold(n_splits=3)

    train_size = 100
    ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
                               cv=cv, n_jobs=-1)

    # assert that this raises an error
    with pytest.raises(ValueError,
                       match='cv.split and cv.get_n_splits returned '
                             'inconsistent results. Expected \\d+ '
                             'splits, got \\d+'):
        ridge.fit(X[:train_size], y[:train_size]) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:22,代碼來源:test_search.py

示例5: _tested_estimators

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def _tested_estimators():
    for name, Estimator in all_estimators():
        if issubclass(Estimator, BiclusterMixin):
            continue
        if name.startswith("_"):
            continue
        # FIXME _skip_test should be used here (if we could)

        required_parameters = getattr(Estimator, "_required_parameters", [])
        if len(required_parameters):
            if required_parameters in (["estimator"], ["base_estimator"]):
                if issubclass(Estimator, RegressorMixin):
                    estimator = Estimator(Ridge())
                else:
                    estimator = Estimator(LinearDiscriminantAnalysis())
            else:
                warnings.warn("Can't instantiate estimator {} which requires "
                              "parameters {}".format(name,
                                                     required_parameters),
                              SkipTestWarning)
                continue
        else:
            estimator = Estimator()
        yield name, estimator 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:26,代碼來源:test_common.py

示例6: test_base_chain_random_order

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_base_chain_random_order():
    # Fit base chain with random order
    X, Y = generate_multilabel_dataset_with_correlations()
    for chain in [ClassifierChain(LogisticRegression()),
                  RegressorChain(Ridge())]:
        chain_random = clone(chain).set_params(order='random', random_state=42)
        chain_random.fit(X, Y)
        chain_fixed = clone(chain).set_params(order=chain_random.order_)
        chain_fixed.fit(X, Y)
        assert_array_equal(chain_fixed.order_, chain_random.order_)
        assert_not_equal(list(chain_random.order), list(range(4)))
        assert_equal(len(chain_random.order_), 4)
        assert_equal(len(set(chain_random.order_)), 4)
        # Randomly ordered chain should behave identically to a fixed order
        # chain with the same order.
        for est1, est2 in zip(chain_random.estimators_,
                              chain_fixed.estimators_):
            assert_array_almost_equal(est1.coef_, est2.coef_) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:20,代碼來源:test_multioutput.py

示例7: test_base_chain_crossval_fit_and_predict

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_base_chain_crossval_fit_and_predict():
    # Fit chain with cross_val_predict and verify predict
    # performance
    X, Y = generate_multilabel_dataset_with_correlations()

    for chain in [ClassifierChain(LogisticRegression()),
                  RegressorChain(Ridge())]:
        chain.fit(X, Y)
        chain_cv = clone(chain).set_params(cv=3)
        chain_cv.fit(X, Y)
        Y_pred_cv = chain_cv.predict(X)
        Y_pred = chain.predict(X)

        assert Y_pred_cv.shape == Y_pred.shape
        assert not np.all(Y_pred == Y_pred_cv)
        if isinstance(chain, ClassifierChain):
            assert jaccard_score(Y, Y_pred_cv, average='samples') > .4
        else:
            assert mean_squared_error(Y, Y_pred_cv) < .25 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:21,代碼來源:test_multioutput.py

示例8: solveSingle

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def solveSingle(self,inputDF,outputDict,rho,beta_target):
        I,J,V,Y=[],[],[],[]
        fd = {} # mapping feature names to consecutive integers, starting with 0
        for i,(id, x) in enumerate(inputDF.items()):
            l = outputDict.get(id)
            for k,v in x.items():
                I.append(i)
                J.append(k)
                V.append(v)
                upd(fd,k)
            Y.append(l)
        J = map(lambda k: fd[k], J)
        X = sparse.coo_matrix((V,(I,J)),shape=(I[-1]+1,len(fd)))
        fd_reverse = [k for k,v in sorted(fd.items(), key = lambda t: t[1])]
        # y_new = y - X . beta_target
        # converting a proximal least square problem to a ridge regression
        ZmUl = np.array([beta_target.get(k,0) for k in fd_reverse])
        y_new = np.array(Y) - X * ZmUl
        ridge = Ridge(alpha =  rho , fit_intercept=False)
        ret = ridge.fit(X,y_new)
        #ret = self.lr.fit(X,y_new)
        # ordered list of feature names according to their integer ids in fd
        #raise ValueError('fd_reverse = %s \n X = %s \n J = %s \n I = %s \n V = %s \n Y = %s \n y_new = %s \n ret.coef_ = %s \n ZmUl = %s \n'\
        #            %(str(fd_reverse), str(X), str(J), str(I), str(V), str(Y), str(y_new), str(ret.coef_), str(ZmUl)))
        return dict(zip(fd_reverse, (ret.coef_ + ZmUl).tolist())) 
開發者ID:yahoo,項目名稱:SparkADMM,代碼行數:27,代碼來源:SparseLinearRegressionSolver.py

示例9: learn_model

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def learn_model(self, x, y, clf, lam = None):
        if (lam is None and self.initlam != -1): # hack for first training
            lam = self.initlam
        if clf is None:
            if lam is None:
                clf = linear_model.LassoCV(max_iter=10000)
                clf.fit(x, y)
                lam = clf.alpha_
            clf = linear_model.Lasso(alpha = lam, \
                                 max_iter = 10000, \
                                 warm_start = True)
        clf.fit(x, y)
        return clf, lam


############################################################################################
# Implements GD Poisoning for Ridge Linear Regression
############################################################################################ 
開發者ID:jagielski,項目名稱:manip-ml,代碼行數:20,代碼來源:gd_poisoners.py

示例10: get_model

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def get_model(PARAMS):
    '''Get model according to parameters'''
    model_dict = {
        'LinearRegression': LinearRegression(),
        'Ridge': Ridge(),
        'Lars': Lars(),
        'ARDRegression': ARDRegression()

    }
    if not model_dict.get(PARAMS['model_name']):
        LOG.exception('Not supported model!')
        exit(1)

    model = model_dict[PARAMS['model_name']]
    model.normalize = bool(PARAMS['normalize'])

    return model 
開發者ID:microsoft,項目名稱:nni,代碼行數:19,代碼來源:main.py

示例11: __init__

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def __init__(self,
                 probabilistic_estimator,
                 stepsize=0.01,
                 verbose=0,
                 fit_intercept=False,
                 sparse_output=True,
                 **ridge_params
                 ):
        """
        Arguments:
            probabilistic_estimator -- Estimator capable of predict_proba

        Keyword Arguments:
            average -- averaging method for f1 score
            stepsize -- stepsize for the exhaustive search of optimal threshold
            fit_intercept -- fit intercept in Ridge regression
            sparse_output -- Predict returns csr in favor of ndarray
            **ridge_params -- Passed down to Ridge regression
        """
        self.model = probabilistic_estimator
        self.verbose = verbose
        self.ridge = Ridge(fit_intercept=fit_intercept, **ridge_params)
        self.stepsize = stepsize
        self.sparse_output = sparse_output 
開發者ID:quadflor,項目名稱:Quadflor,代碼行數:26,代碼來源:neural_net.py

示例12: test_do_not_validate

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_do_not_validate(env_boston):
    exp = CVExperiment(
        model_initializer=Ridge,
        model_init_params={},
        feature_engineer=FeatureEngineer([standard_scale], do_validate=False),
    )

    for step in exp.feature_engineer.steps:
        assert step.original_hashes == {}
        assert step.updated_hashes == {}


##################################################
# `FeatureEngineer.inverse_transform` TypeError Tests
##################################################
# noinspection PyUnusedLocal 
開發者ID:HunterMcGushion,項目名稱:hyperparameter_hunter,代碼行數:18,代碼來源:test_support.py

示例13: test_feature_engineer_list_experiment_inequality

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_feature_engineer_list_experiment_inequality(env_boston, steps_0, steps_1):
    """Test that the `feature_engineer` attribute constructed by
    :class:`~hyperparameter_hunter.experiments.CVExperiment` is NOT the same when given a list as
    input vs. a :class:`~hyperparameter_hunter.feature_engineering.FeatureEngineer` when the two are
    actually different. This is an insanity test to make sure that the related test in this module,
    :func:`test_feature_engineer_list_experiment_equality`, is not simply equating everything"""
    exp_0 = CVExperiment(Ridge, feature_engineer=steps_0)
    exp_1 = CVExperiment(Ridge, feature_engineer=FeatureEngineer(steps_1))
    assert exp_0.feature_engineer != exp_1.feature_engineer

    # Repeat above, but switch which steps are wrapped in `FeatureEngineer`
    exp_2 = CVExperiment(Ridge, feature_engineer=steps_1)
    exp_3 = CVExperiment(Ridge, feature_engineer=FeatureEngineer(steps_0))
    assert exp_2.feature_engineer != exp_3.feature_engineer


##################################################
# OptPros: `FeatureEngineer` as List
##################################################
#################### Equality #################### 
開發者ID:HunterMcGushion,項目名稱:hyperparameter_hunter,代碼行數:22,代碼來源:test_support.py

示例14: test_shape

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def test_shape(random_xy_dataset_regr):
    X, y = random_xy_dataset_regr
    m = X.shape[0]
    pipeline = Pipeline(
        [
            (
                "ml_features",
                FeatureUnion(
                    [
                        ("model_1", EstimatorTransformer(LinearRegression())),
                        ("model_2", EstimatorTransformer(Ridge())),
                    ]
                ),
            )
        ]
    )

    assert pipeline.fit(X, y).transform(X).shape == (m, 2) 
開發者ID:koaning,項目名稱:scikit-lego,代碼行數:20,代碼來源:test_estimatortransformer.py

示例15: iter_explain

# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Ridge [as 別名]
def iter_explain(self, instances_df, nh_size):

        [Xs, Ys, isSparse] = self.preprocessor.generate_samples(nh_size)
        [Xe, Ye, isSparse] = self.preprocessor.preprocess(instances_df)

        sample_weights = self.compute_sample_weights_to_instance(Xe, Xs)
        classes = self.preprocessor.get_classes()
        predictor_features = self.preprocessor.get_predictor_features()
        coefs_cols = ['coef_{}'.format(c) for c in classes]
        predictor_features_df = pd.DataFrame(predictor_features, columns=['feature'])
        samples_cols = ['sample_{}'.format(s) for s in range(nh_size)]

        for row_idx, [to_exp, to_proba, w] in enumerate(izip(Xe, Ye, sample_weights)):
            Xs[0,:] = to_exp
            Ys[0,:] = to_proba
            model_regressor = Ridge(alpha=self.ridge_alpha, fit_intercept=True, random_state=self.random_state)
            #TODO: compare with train explanation learning
            model_regressor.fit(Xs,Ys, sample_weight=w)
            local_r2_score = model_regressor.score(Xs, Ys, sample_weight=None)
            intercept_np = model_regressor.intercept_
            model_coefs = model_regressor.coef_
            kernel_distance_avg = np.mean(w)
            kernel_distance_std = np.std(w)

            coefs_df = pd.DataFrame(model_coefs.T, columns=coefs_cols)
            explanation_df = pd.concat((predictor_features_df,coefs_df), axis=1)
            #TODO: optimize this
            explanation_df.insert(0, '_exp_id', row_idx)

            instance_df = pd.DataFrame(to_exp.reshape(-1, len(to_exp)), columns=predictor_features)
            instance_df['r2_score'] = local_r2_score
            instance_df['kernel_distance_avg'] = kernel_distance_avg
            instance_df['kernel_distance_std'] = kernel_distance_std
            #TODO: optimize this
            instance_df.insert(0, '_exp_id', row_idx)

            #FIXME: used only for debugging 
            #weights_df = pd.DataFrame(w.reshape(-1, len(w)), columns=samples_cols)
            #weights_df.insert(0, '_exp_id', row_idx)

            yield explanation_df, instance_df 
開發者ID:dataiku,項目名稱:dataiku-contrib,代碼行數:43,代碼來源:explanation.py


注:本文中的sklearn.linear_model.Ridge方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。