当前位置: 首页>>代码示例>>Python>>正文


Python Ridge.fit方法代码示例

本文整理汇总了Python中sklearn.linear_model.ridge.Ridge.fit方法的典型用法代码示例。如果您正苦于以下问题:Python Ridge.fit方法的具体用法?Python Ridge.fit怎么用?Python Ridge.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.linear_model.ridge.Ridge的用法示例。


在下文中一共展示了Ridge.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dtype_match

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_dtype_match():
    rng = np.random.RandomState(0)
    alpha = 1.0

    n_samples, n_features = 6, 5
    X_64 = rng.randn(n_samples, n_features)
    y_64 = rng.randn(n_samples)
    X_32 = X_64.astype(np.float32)
    y_32 = y_64.astype(np.float32)

    solvers = ["svd", "sparse_cg", "cholesky", "lsqr"]
    for solver in solvers:

        # Check type consistency 32bits
        ridge_32 = Ridge(alpha=alpha, solver=solver)
        ridge_32.fit(X_32, y_32)
        coef_32 = ridge_32.coef_

        # Check type consistency 64 bits
        ridge_64 = Ridge(alpha=alpha, solver=solver)
        ridge_64.fit(X_64, y_64)
        coef_64 = ridge_64.coef_

        # Do the actual checks at once for easier debug
        assert coef_32.dtype == X_32.dtype
        assert coef_64.dtype == X_64.dtype
        assert ridge_32.predict(X_32).dtype == X_32.dtype
        assert ridge_64.predict(X_64).dtype == X_64.dtype
        assert_almost_equal(ridge_32.coef_, ridge_64.coef_, decimal=5)
开发者ID:Moler1995,项目名称:scikit-learn,代码行数:31,代码来源:test_ridge.py

示例2: test_dtype_match_cholesky

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_dtype_match_cholesky():
    # Test different alphas in cholesky solver to ensure full coverage.
    # This test is separated from test_dtype_match for clarity.
    rng = np.random.RandomState(0)
    alpha = (1.0, 0.5)

    n_samples, n_features, n_target = 6, 7, 2
    X_64 = rng.randn(n_samples, n_features)
    y_64 = rng.randn(n_samples, n_target)
    X_32 = X_64.astype(np.float32)
    y_32 = y_64.astype(np.float32)

    # Check type consistency 32bits
    ridge_32 = Ridge(alpha=alpha, solver='cholesky')
    ridge_32.fit(X_32, y_32)
    coef_32 = ridge_32.coef_

    # Check type consistency 64 bits
    ridge_64 = Ridge(alpha=alpha, solver='cholesky')
    ridge_64.fit(X_64, y_64)
    coef_64 = ridge_64.coef_

    # Do all the checks at once, like this is easier to debug
    assert coef_32.dtype == X_32.dtype
    assert coef_64.dtype == X_64.dtype
    assert ridge_32.predict(X_32).dtype == X_32.dtype
    assert ridge_64.predict(X_64).dtype == X_64.dtype
    assert_almost_equal(ridge_32.coef_, ridge_64.coef_, decimal=5)
开发者ID:Moler1995,项目名称:scikit-learn,代码行数:30,代码来源:test_ridge.py

示例3: test_raises_value_error_if_sample_weights_greater_than_1d

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_raises_value_error_if_sample_weights_greater_than_1d():
    # Sample weights must be either scalar or 1D

    n_sampless = [2, 3]
    n_featuress = [3, 2]

    rng = np.random.RandomState(42)

    for n_samples, n_features in zip(n_sampless, n_featuress):
        X = rng.randn(n_samples, n_features)
        y = rng.randn(n_samples)
        sample_weights_OK = rng.randn(n_samples) ** 2 + 1
        sample_weights_OK_1 = 1.0
        sample_weights_OK_2 = 2.0
        sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
        sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]

        ridge = Ridge(alpha=1)

        # make sure the "OK" sample weights actually work
        ridge.fit(X, y, sample_weights_OK)
        ridge.fit(X, y, sample_weights_OK_1)
        ridge.fit(X, y, sample_weights_OK_2)

        def fit_ridge_not_ok():
            ridge.fit(X, y, sample_weights_not_OK)

        def fit_ridge_not_ok_2():
            ridge.fit(X, y, sample_weights_not_OK_2)

        assert_raise_message(ValueError, "Sample weights must be 1D array or scalar", fit_ridge_not_ok)

        assert_raise_message(ValueError, "Sample weights must be 1D array or scalar", fit_ridge_not_ok_2)
开发者ID:honorLX,项目名称:scikit-learn,代码行数:35,代码来源:test_ridge.py

示例4: test_dtype_match

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_dtype_match(solver):
    rng = np.random.RandomState(0)
    alpha = 1.0

    n_samples, n_features = 6, 5
    X_64 = rng.randn(n_samples, n_features)
    y_64 = rng.randn(n_samples)
    X_32 = X_64.astype(np.float32)
    y_32 = y_64.astype(np.float32)

    # Check type consistency 32bits
    ridge_32 = Ridge(alpha=alpha, solver=solver, max_iter=500, tol=1e-10,)
    ridge_32.fit(X_32, y_32)
    coef_32 = ridge_32.coef_

    # Check type consistency 64 bits
    ridge_64 = Ridge(alpha=alpha, solver=solver, max_iter=500, tol=1e-10,)
    ridge_64.fit(X_64, y_64)
    coef_64 = ridge_64.coef_

    # Do the actual checks at once for easier debug
    assert coef_32.dtype == X_32.dtype
    assert coef_64.dtype == X_64.dtype
    assert ridge_32.predict(X_32).dtype == X_32.dtype
    assert ridge_64.predict(X_64).dtype == X_64.dtype
    assert_allclose(ridge_32.coef_, ridge_64.coef_, rtol=1e-4)
开发者ID:manhhomienbienthuy,项目名称:scikit-learn,代码行数:28,代码来源:test_ridge.py

示例5: test_sparse_design_with_sample_weights

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_sparse_design_with_sample_weights():
    # Sample weights must work with sparse matrices

    n_sampless = [2, 3]
    n_featuress = [3, 2]

    rng = np.random.RandomState(42)

    sparse_matrix_converters = [sp.coo_matrix,
                                sp.csr_matrix,
                                sp.csc_matrix,
                                sp.lil_matrix,
                                sp.dok_matrix
                                ]

    sparse_ridge = Ridge(alpha=1., fit_intercept=False)
    dense_ridge = Ridge(alpha=1., fit_intercept=False)

    for n_samples, n_features in zip(n_sampless, n_featuress):
        X = rng.randn(n_samples, n_features)
        y = rng.randn(n_samples)
        sample_weights = rng.randn(n_samples) ** 2 + 1
        for sparse_converter in sparse_matrix_converters:
            X_sparse = sparse_converter(X)
            sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
            dense_ridge.fit(X, y, sample_weight=sample_weights)

            assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
                                      decimal=6)
开发者ID:BobChew,项目名称:scikit-learn,代码行数:31,代码来源:test_ridge.py

示例6: _test_ridge_loo

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def _test_ridge_loo(filter_):
    # test that can work with both dense or sparse matrices
    n_samples = X_diabetes.shape[0]

    ret = []

    ridge_gcv = _RidgeGCV(fit_intercept=False)
    ridge = Ridge(fit_intercept=False)

    # generalized cross-validation (efficient leave-one-out)
    K, v, Q = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
    errors, c = ridge_gcv._errors(v, Q, y_diabetes, 1.0)
    values, c = ridge_gcv._values(K, v, Q, y_diabetes, 1.0)

    # brute-force leave-one-out: remove one example at a time
    errors2 = []
    values2 = []
    for i in range(n_samples):
        sel = np.arange(n_samples) != i
        X_new = X_diabetes[sel]
        y_new = y_diabetes[sel]
        ridge.fit(X_new, y_new)
        value = ridge.predict([X_diabetes[i]])[0]
        error = (y_diabetes[i] - value) ** 2
        errors2.append(error)
        values2.append(value)

    # check that efficient and brute-force LOO give same results
    assert_almost_equal(errors, errors2)
    assert_almost_equal(values, values2)

    # check best alpha
    ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
    best_alpha = ridge_gcv.best_alpha
    ret.append(best_alpha)

    # check that we get same best alpha with custom loss_func
    ridge_gcv2 = _RidgeGCV(fit_intercept=False, loss_func=mean_squared_error)
    ridge_gcv2.fit(filter_(X_diabetes), y_diabetes)
    assert_equal(ridge_gcv2.best_alpha, best_alpha)

    # check that we get same best alpha with sample weights
    ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
                  sample_weight=np.ones(n_samples))
    assert_equal(ridge_gcv.best_alpha, best_alpha)

    # simulate several responses
    Y = np.vstack((y_diabetes, y_diabetes)).T

    ridge_gcv.fit(filter_(X_diabetes), Y)
    Y_pred = ridge_gcv.predict(filter_(X_diabetes))
    ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
    y_pred = ridge_gcv.predict(filter_(X_diabetes))

    assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
                              Y_pred, decimal=5)

    return ret
开发者ID:cdegroc,项目名称:scikit-learn,代码行数:60,代码来源:test_ridge.py

示例7: _test_tolerance

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def _test_tolerance(filter_):
    ridge = Ridge(tol=1e-5)
    ridge.fit(filter_(X_diabetes), y_diabetes)
    score = ridge.score(filter_(X_diabetes), y_diabetes)

    ridge2 = Ridge(tol=1e-3)
    ridge2.fit(filter_(X_diabetes), y_diabetes)
    score2 = ridge2.score(filter_(X_diabetes), y_diabetes)

    assert_true(score >= score2)
开发者ID:BobChew,项目名称:scikit-learn,代码行数:12,代码来源:test_ridge.py

示例8: _test_tolerance

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def _test_tolerance(filter_):
    ridge = Ridge(tol=1e-5, fit_intercept=False)
    ridge.fit(filter_(X_diabetes), y_diabetes)
    score = ridge.score(filter_(X_diabetes), y_diabetes)

    ridge2 = Ridge(tol=1e-3, fit_intercept=False)
    ridge2.fit(filter_(X_diabetes), y_diabetes)
    score2 = ridge2.score(filter_(X_diabetes), y_diabetes)

    assert score >= score2
开发者ID:manhhomienbienthuy,项目名称:scikit-learn,代码行数:12,代码来源:test_ridge.py

示例9: _test_multi_ridge_diabetes

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def _test_multi_ridge_diabetes(filter_):
    # simulate several responses
    Y = np.vstack((y_diabetes, y_diabetes)).T
    n_features = X_diabetes.shape[1]

    ridge = Ridge(fit_intercept=False)
    ridge.fit(filter_(X_diabetes), Y)
    assert_equal(ridge.coef_.shape, (2, n_features))
    Y_pred = ridge.predict(filter_(X_diabetes))
    ridge.fit(filter_(X_diabetes), y_diabetes)
    y_pred = ridge.predict(filter_(X_diabetes))
    assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
开发者ID:honorLX,项目名称:scikit-learn,代码行数:14,代码来源:test_ridge.py

示例10: test_ridge_singular

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_ridge_singular():
    # test on a singular matrix
    rng = np.random.RandomState(0)
    n_samples, n_features = 6, 6
    y = rng.randn(n_samples // 2)
    y = np.concatenate((y, y))
    X = rng.randn(n_samples // 2, n_features)
    X = np.concatenate((X, X), axis=0)

    ridge = Ridge(alpha=0)
    ridge.fit(X, y)
    assert_greater(ridge.score(X, y), 0.9)
开发者ID:BobChew,项目名称:scikit-learn,代码行数:14,代码来源:test_ridge.py

示例11: test_ridge

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_ridge():
    """Ridge regression convergence test using score

    TODO: for this test to be robust, we should use a dataset instead
    of np.random.
    """
    alpha = 1.0

    for solver in ("sparse_cg", "dense_cholesky", "lsqr"):
        # With more samples than features
        n_samples, n_features = 6, 5
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)

        ridge = Ridge(alpha=alpha, solver=solver)
        ridge.fit(X, y)
        assert_equal(ridge.coef_.shape, (X.shape[1], ))
        assert_greater(ridge.score(X, y), 0.47)

        ridge.fit(X, y, sample_weight=np.ones(n_samples))
        assert_greater(ridge.score(X, y), 0.47)

        # With more features than samples
        n_samples, n_features = 5, 10
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)
        ridge = Ridge(alpha=alpha, solver=solver)
        ridge.fit(X, y)
        assert_greater(ridge.score(X, y), .9)

        ridge.fit(X, y, sample_weight=np.ones(n_samples))
        assert_greater(ridge.score(X, y), 0.9)
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:34,代码来源:test_ridge.py

示例12: test_ridge

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_ridge():
    """Ridge regression convergence test using score

    TODO: for this test to be robust, we should use a dataset instead
    of np.random.
    """
    alpha = 1.0

    # With more samples than features
    n_samples, n_features = 6, 5
    y = np.random.randn(n_samples)
    X = np.random.randn(n_samples, n_features)

    ridge = Ridge(alpha=alpha)
    ridge.fit(X, y)
    assert_equal(ridge.coef_.shape, (X.shape[1],))
    assert ridge.score(X, y) > 0.5

    ridge.fit(X, y, sample_weight=np.ones(n_samples))
    assert ridge.score(X, y) > 0.5

    # With more features than samples
    n_samples, n_features = 5, 10
    y = np.random.randn(n_samples)
    X = np.random.randn(n_samples, n_features)
    ridge = Ridge(alpha=alpha)
    ridge.fit(X, y)
    assert ridge.score(X, y) > 0.9

    ridge.fit(X, y, sample_weight=np.ones(n_samples))
    assert ridge.score(X, y) > 0.9
开发者ID:nitikachandrakar,项目名称:scikit-learn,代码行数:33,代码来源:test_ridge.py

示例13: test_ridge

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_ridge():
    # Ridge regression convergence test using score
    # TODO: for this test to be robust, we should use a dataset instead
    # of np.random.
    rng = np.random.RandomState(0)
    alpha = 1.0

    for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
        # With more samples than features
        n_samples, n_features = 6, 5
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)

        ridge = Ridge(alpha=alpha, solver=solver)
        ridge.fit(X, y)
        assert_equal(ridge.coef_.shape, (X.shape[1], ))
        assert_greater(ridge.score(X, y), 0.47)

        if solver == "cholesky":
            # Currently the only solver to support sample_weight.
            ridge.fit(X, y, sample_weight=np.ones(n_samples))
            assert_greater(ridge.score(X, y), 0.47)

        # With more features than samples
        n_samples, n_features = 5, 10
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)
        ridge = Ridge(alpha=alpha, solver=solver)
        ridge.fit(X, y)
        assert_greater(ridge.score(X, y), .9)

        if solver == "cholesky":
            # Currently the only solver to support sample_weight.
            ridge.fit(X, y, sample_weight=np.ones(n_samples))
            assert_greater(ridge.score(X, y), 0.9)
开发者ID:BobChew,项目名称:scikit-learn,代码行数:37,代码来源:test_ridge.py

示例14: test_ridge_sample_weights

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_ridge_sample_weights():
    rng = np.random.RandomState(0)

    for solver in ("cholesky", ):
        for n_samples, n_features in ((6, 5), (5, 10)):
            for alpha in (1.0, 1e-2):
                y = rng.randn(n_samples)
                X = rng.randn(n_samples, n_features)
                sample_weight = 1 + rng.rand(n_samples)

                coefs = ridge_regression(X, y,
                                         alpha=alpha,
                                         sample_weight=sample_weight,
                                         solver=solver)
                # Sample weight can be implemented via a simple rescaling
                # for the square loss.
                coefs2 = ridge_regression(
                    X * np.sqrt(sample_weight)[:, np.newaxis],
                    y * np.sqrt(sample_weight),
                    alpha=alpha, solver=solver)
                assert_array_almost_equal(coefs, coefs2)

                # Test for fit_intercept = True
                est = Ridge(alpha=alpha, solver=solver)
                est.fit(X, y, sample_weight=sample_weight)

                # Check using Newton's Method
                # Quadratic function should be solved in a single step.
                # Initialize
                sample_weight = np.sqrt(sample_weight)
                X_weighted = sample_weight[:, np.newaxis] * (
                    np.column_stack((np.ones(n_samples), X)))
                y_weighted = y * sample_weight

                # Gradient is (X*coef-y)*X + alpha*coef_[1:]
                # Remove coef since it is initialized to zero.
                grad = -np.dot(y_weighted, X_weighted)

                # Hessian is (X.T*X) + alpha*I except that the first
                # diagonal element should be zero, since there is no
                # penalization of intercept.
                diag = alpha * np.ones(n_features + 1)
                diag[0] = 0.
                hess = np.dot(X_weighted.T, X_weighted)
                hess.flat[::n_features + 2] += diag
                coef_ = - np.dot(linalg.inv(hess), grad)
                assert_almost_equal(coef_[0], est.intercept_)
                assert_array_almost_equal(coef_[1:], est.coef_)
开发者ID:BobChew,项目名称:scikit-learn,代码行数:50,代码来源:test_ridge.py

示例15: test_fit_simple_backupsklearn

# 需要导入模块: from sklearn.linear_model.ridge import Ridge [as 别名]
# 或者: from sklearn.linear_model.ridge.Ridge import fit [as 别名]
def test_fit_simple_backupsklearn():
    df = pd.read_csv("./open_data/simple.txt", delim_whitespace=True)
    X = np.array(df.iloc[:, :df.shape[1] - 1], dtype='float32', order='C')
    y = np.array(df.iloc[:, df.shape[1] - 1], dtype='float32', order='C')
    Solver = h2o4gpu.Ridge

    enet = Solver(glm_stop_early=False)
    print("h2o4gpu fit()")
    enet.fit(X, y)
    print("h2o4gpu predict()")
    print(enet.predict(X))
    print("h2o4gpu score()")
    print(enet.score(X,y))

    enet_wrapper = Solver(normalize=True, random_state=1234)
    print("h2o4gpu scikit wrapper fit()")
    enet_wrapper.fit(X, y)
    print("h2o4gpu scikit wrapper predict()")
    print(enet_wrapper.predict(X))
    print("h2o4gpu scikit wrapper score()")
    print(enet_wrapper.score(X, y))

    from sklearn.linear_model.ridge import Ridge
    enet_sk = Ridge(normalize=True, random_state=1234)
    print("Scikit fit()")
    enet_sk.fit(X, y)
    print("Scikit predict()")
    print(enet_sk.predict(X))
    print("Scikit score()")
    print(enet_sk.score(X, y))

    enet_sk_coef = csr_matrix(enet_sk.coef_, dtype=np.float32).toarray()

    print(enet_sk.coef_)

    print(enet_sk_coef)

    print(enet_wrapper.coef_)

    print(enet_sk.intercept_)
    print(enet_wrapper.intercept_)

    print(enet_sk.n_iter_)
    print(enet_wrapper.n_iter_)

    print("Coeffs, intercept, and n_iters should match")
    assert np.allclose(enet_wrapper.coef_, enet_sk_coef)
    assert np.allclose(enet_wrapper.intercept_, enet_sk.intercept_)
开发者ID:wamsiv,项目名称:h2o4gpu,代码行数:50,代码来源:test_ridge_sklearn_wrapper.py


注:本文中的sklearn.linear_model.ridge.Ridge.fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。