本文整理汇总了Python中sklearn.linear_model.base.LinearRegression类的典型用法代码示例。如果您正苦于以下问题:Python LinearRegression类的具体用法?Python LinearRegression怎么用?Python LinearRegression使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LinearRegression类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PredictLoss
class PredictLoss(BaseLR):
def __init__(self, hist=30, posmax=15, lr=0.2):
from sklearn.linear_model.base import LinearRegression
from collections import deque
self.hist = hist
self.track = deque(maxlen=self.hist)
self.regr = LinearRegression()
self.poscases = 0
self.posmax = posmax
self.lr = lr
def __call__(self, env):
if len(self.track) > 5:
y = np.array(self.track)
x = np.array(range(len(y.shape))).reshape(-1, 1)
self.regr.fit(x, y)
coef_ = self.regr.coef_[0]
preds = self.regr.predict(x)
fst = preds[0]
lst = preds[-1]
e = np.sqrt(((y - preds)**2).mean())
if coef_ > 0:
self.poscases += 1
if self.poscases >= self.posmax:
raise EarlyStopException
else:
self.poscases -= 1
if self.poscases < 0:
self.poscases = 0
diff = np.abs(fst - lst)
coef = np.clip(diff/e, 1e-6, 1)
lr = self.lr*coef
print(lr, e, diff, coef_, coef, file=open('log.txt', 'a'))
env.model.set_param("learning_rate", lr)
示例2: test_linear_regression_n_jobs
def test_linear_regression_n_jobs():
"""
Test for the n_jobs parameter on the fit method and the constructor
"""
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf_fit = clf.fit(X, Y, 4)
assert_equal(clf_fit.n_jobs, clf.n_jobs)
assert_equal(clf.n_jobs, 1)
示例3: test_linear_regression_sparse
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
示例4: __init__
def __init__(self, hist=30, posmax=15, lr=0.2):
from sklearn.linear_model.base import LinearRegression
from collections import deque
self.hist = hist
self.track = deque(maxlen=self.hist)
self.regr = LinearRegression()
self.poscases = 0
self.posmax = posmax
self.lr = lr
示例5: test_ridge_vs_lstsq
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
示例6: test_raises_value_error_if_sample_weights_greater_than_1d
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
示例7: __init__
def __init__(self, weights=None, cv_train_size=None):
estimators = []
estimators.append(KNeighborsRegressor(n_neighbors=3))
estimators.append(DecisionTreeRegressor())
estimators.append(BayesianRidge())
# estimators.append(BayesianRidge())
self.estimators = estimators
self.stacker = LinearRegression()
self.weights = weights if weights is not None else {}
self.cv_train_size = cv_train_size if cv_train_size is not None else 0.7
self._is_fitted = False
示例8: test_linear_regression_sample_weights
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
示例9: StackedRegression
class StackedRegression(LinearModel, RegressorMixin):
def __init__(self, weights=None, cv_train_size=None):
estimators = []
estimators.append(KNeighborsRegressor(n_neighbors=3))
estimators.append(DecisionTreeRegressor())
estimators.append(BayesianRidge())
# estimators.append(BayesianRidge())
self.estimators = estimators
self.stacker = LinearRegression()
self.weights = weights if weights is not None else {}
self.cv_train_size = cv_train_size if cv_train_size is not None else 0.7
self._is_fitted = False
def fit_stack(self, X, y):
print('fitting')
print(X.shape)
n_train = int(X.shape[0] * self.cv_train_size)
for estimator in self.estimators:
estimator.fit(X[:n_train, :], y[:n_train])
predictions = np.concatenate([np.matrix(estimator.predict(X[n_train:, :])).transpose()
for estimator in self.estimators], axis=1)
self.stacker.fit(predictions, y[n_train:])
self._is_fitted = True
print('fitted')
print(self.stacker.residues_)
def fit(self, X, y):
if not self._is_fitted:
raise NotFittedError('StackedRegression must call fit_stack before fit.')
for estimator in self.estimators:
estimator.fit(X, y)
def predict(self, X):
predictions = np.concatenate([np.matrix(estimator.predict(X)).transpose()
for estimator in self.estimators], axis=1)
return self.stacker.predict(predictions)
示例10: test_linear_regression_multiple_outcome
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
示例11: test_linear_regression_sparse_multiple_outcome
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
示例12: test_linear_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
示例13: test_linear_regression_sparse_equal_dense
def test_linear_regression_sparse_equal_dense(normalize, fit_intercept):
# Test that linear regression agrees between sparse and dense
rng = check_random_state(0)
n_samples = 200
n_features = 2
X = rng.randn(n_samples, n_features)
X[X < 0.1] = 0.
Xcsr = sparse.csr_matrix(X)
y = rng.rand(n_samples)
params = dict(normalize=normalize, fit_intercept=fit_intercept)
clf_dense = LinearRegression(**params)
clf_sparse = LinearRegression(**params)
clf_dense.fit(X, y)
clf_sparse.fit(Xcsr, y)
assert clf_dense.intercept_ == pytest.approx(clf_sparse.intercept_)
assert_allclose(clf_dense.coef_, clf_sparse.coef_)
示例14: test_linear_regression_sample_weights
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
示例15: fit
def fit(self, X1, y1, X2, y2):
"""Fit estimator using RANSAC algorithm.
Namely, the fit is done into two main steps:
- pre-fitting: quickly select n_prefits configurations which seems
suitable given topological constraints.
- finding best fit: select the pre-fit with the maximum number of inliers
as the best fit.
Inputs:
X1, y1: Left lane points (supposedly)
X2, y2: Right lane points (supposedly)
"""
check_consistent_length(X1, y1)
check_consistent_length(X2, y2)
# Assume linear model by default
min_samples = X1.shape[1] + 1
if min_samples > X1.shape[0] or min_samples > X2.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X1-2.shape[0]``.")
# Check additional parameters...
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
# random_state = check_random_state(self.random_state)
# === Pre-fit with small subsets (4 points) === #
# Allows to quickly pre-select some good configurations.
w1_prefits, w2_prefits = lanes_ransac_prefit(X1, y1, X2, y2,
self.n_prefits,
self.max_trials,
self.is_valid_diffs,
self.is_valid_bounds)
# === Select best pre-fit, using the full dataset === #
post_fit = 0
(w1,
w2,
inlier_mask1,
inlier_mask2) = lanes_ransac_select_best(X1, y1, X2, y2,
w1_prefits, w2_prefits,
residual_threshold,
post_fit)
self.w1_ = w1
self.w2_ = w2
# Set regression parameters.
base_estimator1 = LinearRegression(fit_intercept=False)
base_estimator1.coef_ = w1
base_estimator1.intercept_ = 0.0
base_estimator2 = LinearRegression(fit_intercept=False)
base_estimator2.coef_ = w2
base_estimator2.intercept_ = 0.0
# Save final model parameters.
self.estimator1_ = base_estimator1
self.estimator2_ = base_estimator2
self.inlier_mask1_ = inlier_mask1
self.inlier_mask2_ = inlier_mask2
# # Estimate final model using all inliers
# # base_estimator1.fit(X1_inlier_best, y1_inlier_best)
# # base_estimator2.fit(X2_inlier_best, y2_inlier_best)
return self