本文整理匯總了Python中sklearn.linear_model.Lasso方法的典型用法代碼示例。如果您正苦於以下問題:Python linear_model.Lasso方法的具體用法?Python linear_model.Lasso怎麽用?Python linear_model.Lasso使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.linear_model
的用法示例。
在下文中一共展示了linear_model.Lasso方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_with_complementary_pairs_bootstrap
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_with_complementary_pairs_bootstrap():
n, p, k = 500, 1000, 5
X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)
base_estimator = Pipeline([
('scaler', StandardScaler()),
('model', Lasso())
])
lambdas_grid = np.logspace(-1, 1, num=10)
selector = StabilitySelection(base_estimator=base_estimator,
lambda_name='model__alpha',
lambda_grid=lambdas_grid,
bootstrap_func='complementary_pairs')
selector.fit(X, y)
chosen_betas = selector.get_support(indices=True)
assert_almost_equal(important_betas, chosen_betas)
示例2: test_stability_selection_regression
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_stability_selection_regression():
n, p, k = 500, 1000, 5
X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)
base_estimator = Pipeline([
('scaler', StandardScaler()),
('model', Lasso())
])
lambdas_grid = np.logspace(-1, 1, num=10)
selector = StabilitySelection(base_estimator=base_estimator,
lambda_name='model__alpha',
lambda_grid=lambdas_grid)
selector.fit(X, y)
chosen_betas = selector.get_support(indices=True)
assert_almost_equal(important_betas, chosen_betas)
示例3: test_different_shape
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_different_shape():
n, p, k = 100, 200, 5
X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)
base_estimator = Pipeline([
('scaler', StandardScaler()),
('model', Lasso())
])
lambdas_grid = np.logspace(-1, 1, num=10)
selector = StabilitySelection(base_estimator=base_estimator,
lambda_name='model__alpha',
lambda_grid=lambdas_grid)
selector.fit(X, y)
selector.transform(X[:, :-2])
示例4: test_no_features
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_no_features():
n, p, k = 100, 200, 0
X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)
base_estimator = Pipeline([
('scaler', StandardScaler()),
('model', Lasso())
])
lambdas_grid = np.logspace(-1, 1, num=10)
selector = StabilitySelection(base_estimator=base_estimator,
lambda_name='model__alpha',
lambda_grid=lambdas_grid)
selector.fit(X, y)
assert_almost_equal(selector.transform(X),
np.empty(0).reshape((X.shape[0], 0)))
示例5: test_stability_plot
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_stability_plot():
n, p, k = 500, 200, 5
X, y, important_betas = _generate_dummy_regression_data(n=n, k=k)
base_estimator = Pipeline([
('scaler', StandardScaler()),
('model', Lasso())
])
lambdas_grid = np.logspace(-1, 1, num=10)
selector = StabilitySelection(base_estimator=base_estimator,
lambda_name='model__alpha',
lambda_grid=lambdas_grid)
selector.fit(X, y)
plot_stability_path(selector, threshold_highlight=0.5)
示例6: build_ensemble
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def build_ensemble(**kwargs):
"""Generate ensemble."""
ens = SuperLearner(**kwargs)
prep = {'Standard Scaling': [StandardScaler()],
'Min Max Scaling': [MinMaxScaler()],
'No Preprocessing': []}
est = {'Standard Scaling':
[ElasticNet(), Lasso(), KNeighborsRegressor()],
'Min Max Scaling':
[SVR()],
'No Preprocessing':
[RandomForestRegressor(random_state=SEED),
GradientBoostingRegressor()]}
ens.add(est, prep)
ens.add(GradientBoostingRegressor(), meta=True)
return ens
示例7: test_transform_target_regressor_error
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_transform_target_regressor_error():
X, y = friedman
# provide a transformer and functions at the same time
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=StandardScaler(),
func=np.exp, inverse_func=np.log)
assert_raises_regex(ValueError, "'transformer' and functions"
" 'func'/'inverse_func' cannot both be set.",
regr.fit, X, y)
# fit with sample_weight with a regressor which does not support it
sample_weight = np.ones((y.shape[0],))
regr = TransformedTargetRegressor(regressor=Lasso(),
transformer=StandardScaler())
assert_raises_regex(TypeError, r"fit\(\) got an unexpected keyword "
"argument 'sample_weight'", regr.fit, X, y,
sample_weight=sample_weight)
# func is given but inverse_func is not
regr = TransformedTargetRegressor(func=np.exp)
assert_raises_regex(ValueError, "When 'func' is provided, 'inverse_func'"
" must also be provided", regr.fit, X, y)
示例8: test_multi_target_regression_partial_fit
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0, max_iter=5)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0, max_iter=5))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
assert not hasattr(MultiOutputRegressor(Lasso), 'partial_fit')
示例9: test_rank_deficient_design
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in (
[[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]]
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
示例10: test_lasso_lars_vs_lasso_cd_early_stopping
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def test_lasso_lars_vs_lasso_cd_early_stopping():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
示例11: lasso
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def lasso(df, dependent_variable, independent_variables, interaction_terms=[], model_limit=5):
considered_independent_variables_per_model, patsy_models = \
construct_models(df, dependent_variable, independent_variables, interaction_terms, table_layout=MCT.ALL_VARIABLES.value)
y, X = dmatrices(patsy_models[0], df, return_type='dataframe')
clf = linear_model.Lasso(
alpha = 1.0,
normalize=True
)
clf.fit(X, y)
fit_coef = clf.coef_
column_means = np.apply_along_axis(np.mean, 1, X)
selected_variables = [ independent_variable for (i, independent_variable) in enumerate(independent_variables) if ( abs(fit_coef[i]) >= column_means[i] ) ]
return selected_variables
示例12: getModels
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def getModels():
result = []
result.append("LinearRegression")
result.append("BayesianRidge")
result.append("ARDRegression")
result.append("ElasticNet")
result.append("HuberRegressor")
result.append("Lasso")
result.append("LassoLars")
result.append("Rigid")
result.append("SGDRegressor")
result.append("SVR")
result.append("MLPClassifier")
result.append("KNeighborsClassifier")
result.append("SVC")
result.append("GaussianProcessClassifier")
result.append("DecisionTreeClassifier")
result.append("RandomForestClassifier")
result.append("AdaBoostClassifier")
result.append("GaussianNB")
result.append("LogisticRegression")
result.append("QuadraticDiscriminantAnalysis")
return result
示例13: comp_attack_vld
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def comp_attack_vld(self,clf,wxc,bxc,wyc,byc,otherargs):
n = self.vldx.shape[0]
res = (clf.predict(self.vldx)-self.vldy)
gradx = np.dot(self.vldx, wxc) + bxc
grady = np.dot(self.vldx, wyc.T) + byc
attackx = np.dot(res,gradx) / n
attacky = np.dot(res,grady) / n
return attackx, attacky
############################################################################################
# Implements GD Poisoning for Lasso Linear Regression
############################################################################################
示例14: learn_model
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def learn_model(self, x, y, clf, lam = None):
if (lam is None and self.initlam != -1): # hack for first training
lam = self.initlam
if clf is None:
if lam is None:
clf = linear_model.LassoCV(max_iter=10000)
clf.fit(x, y)
lam = clf.alpha_
clf = linear_model.Lasso(alpha = lam, \
max_iter = 10000, \
warm_start = True)
clf.fit(x, y)
return clf, lam
############################################################################################
# Implements GD Poisoning for Ridge Linear Regression
############################################################################################
示例15: lasso_correlation_matrix
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import Lasso [as 別名]
def lasso_correlation_matrix(vec1, vec2, random_state=None):
"""Computes correlation matrix of two representations using Lasso Regression.
Args:
vec1: 2d array of representations with axis 0 the batch dimension and axis 1
the representation dimension.
vec2: 2d array of representations with axis 0 the batch dimension and axis 1
the representation dimension.
random_state: int used to seed an RNG used for model training.
Returns:
A 2d array with the correlations between all pairwise combinations of
elements of both representations are computed. Elements of vec1 correspond
to axis 0 and elements of vec2 correspond to axis 1.
"""
assert vec1.shape == vec2.shape
model = linear_model.Lasso(random_state=random_state, alpha=0.1)
model.fit(vec1, vec2)
return np.transpose(np.absolute(model.coef_))