当前位置: 首页>>代码示例>>Python>>正文


Python LinearRegression.fit方法代码示例

本文整理汇总了Python中sklearn.linear_model.base.LinearRegression.fit方法的典型用法代码示例。如果您正苦于以下问题:Python LinearRegression.fit方法的具体用法?Python LinearRegression.fit怎么用?Python LinearRegression.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.linear_model.base.LinearRegression的用法示例。


在下文中一共展示了LinearRegression.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: PredictLoss

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
class PredictLoss(BaseLR):
    def __init__(self, hist=30, posmax=15, lr=0.2):
        from sklearn.linear_model.base import LinearRegression
        from collections import deque
        self.hist = hist
        self.track = deque(maxlen=self.hist)
        self.regr = LinearRegression()
        self.poscases = 0
        self.posmax = posmax
        self.lr = lr

    def __call__(self, env):
        if len(self.track) > 5:
            y = np.array(self.track)
            x = np.array(range(len(y.shape))).reshape(-1, 1)
            self.regr.fit(x, y)
            coef_ = self.regr.coef_[0]
            preds = self.regr.predict(x)
            fst = preds[0]
            lst = preds[-1]
            e = np.sqrt(((y - preds)**2).mean())
            if coef_ > 0:
                self.poscases += 1
                if self.poscases >= self.posmax:
                    raise EarlyStopException
            else:
                self.poscases -= 1
                if self.poscases < 0:
                    self.poscases = 0
            diff = np.abs(fst - lst)
            coef = np.clip(diff/e, 1e-6, 1)
            lr = self.lr*coef
            print(lr, e, diff, coef_, coef, file=open('log.txt', 'a'))
            env.model.set_param("learning_rate", lr)
开发者ID:kiselev1189,项目名称:xgb_dynamic_lr,代码行数:36,代码来源:lr_callbacks.py

示例2: test_linear_regression_sparse

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_sparse(random_state=0):
    "Test that linear regression also works with sparse data"
    random_state = check_random_state(random_state)
    n = 100
    X = sparse.eye(n, n)
    beta = random_state.rand(n)
    y = X * beta[:, np.newaxis]

    ols = LinearRegression()
    ols.fit(X, y.ravel())
    assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
    assert_array_almost_equal(ols.residues_, 0)
开发者ID:1oscar,项目名称:scikit-learn,代码行数:14,代码来源:test_base.py

示例3: test_linear_regression_multiple_outcome

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_multiple_outcome(random_state=0):
    "Test multiple-outcome linear regressions"
    X, y = make_regression(random_state=random_state)

    Y = np.vstack((y, y)).T
    n_features = X.shape[1]

    clf = LinearRegression(fit_intercept=True)
    clf.fit((X), Y)
    assert_equal(clf.coef_.shape, (2, n_features))
    Y_pred = clf.predict(X)
    clf.fit(X, y)
    y_pred = clf.predict(X)
    assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
开发者ID:Kappie,项目名称:support_vector_machine,代码行数:16,代码来源:test_base.py

示例4: test_linear_regression_sparse_multiple_outcome

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_sparse_multiple_outcome(random_state=0):
    "Test multiple-outcome linear regressions with sparse data"
    random_state = check_random_state(random_state)
    X, y = make_sparse_uncorrelated(random_state=random_state)
    X = sparse.coo_matrix(X)
    Y = np.vstack((y, y)).T
    n_features = X.shape[1]

    ols = LinearRegression()
    ols.fit(X, Y)
    assert_equal(ols.coef_.shape, (2, n_features))
    Y_pred = ols.predict(X)
    ols.fit(X, y.ravel())
    y_pred = ols.predict(X)
    assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
开发者ID:Kappie,项目名称:support_vector_machine,代码行数:17,代码来源:test_base.py

示例5: test_linear_regression_sparse_equal_dense

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_sparse_equal_dense(normalize, fit_intercept):
    # Test that linear regression agrees between sparse and dense
    rng = check_random_state(0)
    n_samples = 200
    n_features = 2
    X = rng.randn(n_samples, n_features)
    X[X < 0.1] = 0.
    Xcsr = sparse.csr_matrix(X)
    y = rng.rand(n_samples)
    params = dict(normalize=normalize, fit_intercept=fit_intercept)
    clf_dense = LinearRegression(**params)
    clf_sparse = LinearRegression(**params)
    clf_dense.fit(X, y)
    clf_sparse.fit(Xcsr, y)
    assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_)
    assert_allclose(clf_dense.coef_, clf_sparse.coef_)
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:18,代码来源:test_base.py

示例6: test_ridge_vs_lstsq

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_ridge_vs_lstsq():
    """On alpha=0., Ridge and OLS yield the same solution."""

    # we need more samples than features
    n_samples, n_features = 5, 4
    y = rng.randn(n_samples)
    X = rng.randn(n_samples, n_features)

    ridge = Ridge(alpha=0., fit_intercept=False)
    ols = LinearRegression(fit_intercept=False)

    ridge.fit(X, y)
    ols.fit(X, y)
    assert_almost_equal(ridge.coef_, ols.coef_)

    ridge.fit(X, y)
    ols.fit(X, y)
    assert_almost_equal(ridge.coef_, ols.coef_)
开发者ID:Jetafull,项目名称:scikit-learn,代码行数:20,代码来源:test_ridge.py

示例7: test_raises_value_error_if_sample_weights_greater_than_1d

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_raises_value_error_if_sample_weights_greater_than_1d():
    # Sample weights must be either scalar or 1D

    n_sampless = [2, 3]
    n_featuress = [3, 2]

    for n_samples, n_features in zip(n_sampless, n_featuress):
        X = rng.randn(n_samples, n_features)
        y = rng.randn(n_samples)
        sample_weights_OK = rng.randn(n_samples) ** 2 + 1
        sample_weights_OK_1 = 1.
        sample_weights_OK_2 = 2.

        reg = LinearRegression()

        # make sure the "OK" sample weights actually work
        reg.fit(X, y, sample_weights_OK)
        reg.fit(X, y, sample_weights_OK_1)
        reg.fit(X, y, sample_weights_OK_2)
开发者ID:allefpablo,项目名称:scikit-learn,代码行数:21,代码来源:test_base.py

示例8: test_linear_regression_n_jobs

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_n_jobs():
    """
    Test for the n_jobs parameter on the fit method and the constructor
    """
    X = [[1], [2]]
    Y = [1, 2]
    clf = LinearRegression()
    clf_fit = clf.fit(X, Y, 4)
    assert_equal(clf_fit.n_jobs, clf.n_jobs)
    assert_equal(clf.n_jobs, 1)
开发者ID:0x0all,项目名称:scikit-learn,代码行数:12,代码来源:test_base.py

示例9: test_linear_regression_sample_weights

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_sample_weights():
    # TODO: loop over sparse data as well

    rng = np.random.RandomState(0)

    # It would not work with under-determined systems
    for n_samples, n_features in ((6, 5), ):

        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)
        sample_weight = 1.0 + rng.rand(n_samples)

        for intercept in (True, False):

            # LinearRegression with explicit sample_weight
            reg = LinearRegression(fit_intercept=intercept)
            reg.fit(X, y, sample_weight=sample_weight)
            coefs1 = reg.coef_
            inter1 = reg.intercept_

            assert_equal(reg.coef_.shape, (X.shape[1], ))  # sanity checks
            assert_greater(reg.score(X, y), 0.5)

            # Closed form of the weighted least square
            # theta = (X^T W X)^(-1) * X^T W y
            W = np.diag(sample_weight)
            if intercept is False:
                X_aug = X
            else:
                dummy_column = np.ones(shape=(n_samples, 1))
                X_aug = np.concatenate((dummy_column, X), axis=1)

            coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
                                  X_aug.T.dot(W).dot(y))

            if intercept is False:
                assert_array_almost_equal(coefs1, coefs2)
            else:
                assert_array_almost_equal(coefs1, coefs2[1:])
                assert_almost_equal(inter1, coefs2[0])
开发者ID:AlexanderFabisch,项目名称:scikit-learn,代码行数:42,代码来源:test_base.py

示例10: test_linear_regression

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression():
    # Test LinearRegression on a simple dataset.
    # a simple dataset
    X = [[1], [2]]
    Y = [1, 2]

    clf = LinearRegression()
    clf.fit(X, Y)

    assert_array_almost_equal(clf.coef_, [1])
    assert_array_almost_equal(clf.intercept_, [0])
    assert_array_almost_equal(clf.predict(X), [1, 2])

    # test it also for degenerate input
    X = [[1]]
    Y = [0]

    clf = LinearRegression()
    clf.fit(X, Y)
    assert_array_almost_equal(clf.coef_, [0])
    assert_array_almost_equal(clf.intercept_, [0])
    assert_array_almost_equal(clf.predict(X), [0])
开发者ID:Kappie,项目名称:support_vector_machine,代码行数:24,代码来源:test_base.py

示例11: test_linear_regression_sample_weights

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
def test_linear_regression_sample_weights():
    rng = np.random.RandomState(0)

    for n_samples, n_features in ((6, 5), (5, 10)):
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)
        sample_weight = 1.0 + rng.rand(n_samples)

        clf = LinearRegression()
        clf.fit(X, y, sample_weight)
        coefs1 = clf.coef_

        assert_equal(clf.coef_.shape, (X.shape[1], ))
        assert_greater(clf.score(X, y), 0.9)
        assert_array_almost_equal(clf.predict(X), y)

        # Sample weight can be implemented via a simple rescaling
        # for the square loss.
        scaled_y = y * np.sqrt(sample_weight)
        scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
        clf.fit(X, y)
        coefs2 = clf.coef_

        assert_array_almost_equal(coefs1, coefs2)
开发者ID:AtonLerin,项目名称:maya_python_packages,代码行数:26,代码来源:test_base.py

示例12: StackedRegression

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
class StackedRegression(LinearModel, RegressorMixin):
    def __init__(self, weights=None, cv_train_size=None):
        estimators = []
        estimators.append(KNeighborsRegressor(n_neighbors=3))
        estimators.append(DecisionTreeRegressor())
        estimators.append(BayesianRidge())
        # estimators.append(BayesianRidge())
        self.estimators = estimators
        self.stacker = LinearRegression()
        self.weights = weights if weights is not None else {}
        self.cv_train_size = cv_train_size if cv_train_size is not None else 0.7
        self._is_fitted = False

    def fit_stack(self, X, y):
        print('fitting')
        print(X.shape)
        n_train = int(X.shape[0] * self.cv_train_size)
        for estimator in self.estimators:
            estimator.fit(X[:n_train, :], y[:n_train])
        predictions = np.concatenate([np.matrix(estimator.predict(X[n_train:, :])).transpose()
                                      for estimator in self.estimators], axis=1)
        self.stacker.fit(predictions, y[n_train:])
        self._is_fitted = True
        print('fitted')
        print(self.stacker.residues_)

    def fit(self, X, y):
        if not self._is_fitted:
            raise NotFittedError('StackedRegression must call fit_stack before fit.')
        for estimator in self.estimators:
            estimator.fit(X, y)

    def predict(self, X):
        predictions = np.concatenate([np.matrix(estimator.predict(X)).transpose()
                                      for estimator in self.estimators], axis=1)
        return self.stacker.predict(predictions)
开发者ID:AlexandreGuinaudeau,项目名称:AxaDataChallenge,代码行数:38,代码来源:stacked_regressor.py

示例13: list

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
    inp_prices = list()
    features = list()
    def get_inp_features(self): 
        return self.inp_features
    def get_inp_prices(self): 
        return self.inp_prices
    def get_features(self): 
        return self.features
    
    def read(self):
        F, N = map(int, raw_input().split(' '))              
        for _ in range(N):
            inp_f = map(float, raw_input().strip().split())
            self.inp_features.append(inp_f[:F:])
            self.inp_prices.append(inp_f[F::])
        questions = int(raw_input())        
        for _ in range(questions):
            self.features.append(map(float, raw_input().split()))
        
reader = inp_reader()
reader.read()
inp_features = reader.get_inp_features()
inp_prices = reader.get_inp_prices()
features = reader.get_features()
 
model = LinearRegression()

model.fit(inp_features, inp_prices)
prices=model.predict(features)
for el in prices:
    print (el[0])
开发者ID:leoldn,项目名称:hackerrank,代码行数:33,代码来源:solution.py

示例14: zip

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
### draw the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
    plt.scatter( feature, target, color=test_color ) 
for feature, target in zip(feature_train, target_train):
    plt.scatter( feature, target, color=train_color ) 

### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")


from sklearn.linear_model.base import LinearRegression

reg = LinearRegression()
reg.fit(feature_train, target_train)
print("Slope %s" % reg.coef_)
print("Intercept %s" % reg.intercept_)

print("Score = ", reg.score(feature_test, target_test))
### draw the regression line, once it's coded
try:
    plt.plot( feature_test, reg.predict(feature_test) )
except NameError:
    pass
reg.fit(feature_test, target_test)
plt.plot(feature_train, reg.predict(feature_train), color="b")
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show()
开发者ID:atdi,项目名称:ud120-projects,代码行数:33,代码来源:finance_regression.py

示例15: train_test_split

# 需要导入模块: from sklearn.linear_model.base import LinearRegression [as 别名]
# 或者: from sklearn.linear_model.base.LinearRegression import fit [as 别名]
### ages and net_worths need to be reshaped into 2D numpy arrays
### second argument of reshape command is a tuple of integers: (n_rows, n_columns)
### by convention, n_rows is the number of data points
### and n_columns is the number of features
ages       = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
from sklearn.cross_validation import train_test_split
ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths, test_size=0.1, random_state=42)

### fill in a regression here!  Name the regression object reg so that
### the plotting code below works, and you can see what your regression looks like
from sklearn.linear_model.base import LinearRegression

reg = LinearRegression()
reg.fit(ages_train, net_worths_train)
print("Slope %s" % reg.coef_)
print("Intercept %s" % reg.intercept_)

print("Score = ", reg.score(ages_test, net_worths_test))






try:
    plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
    pass
plt.scatter(ages, net_worths)
开发者ID:atdi,项目名称:ud120-projects,代码行数:32,代码来源:outlier_removal_regression.py


注:本文中的sklearn.linear_model.base.LinearRegression.fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。