本文整理汇总了Python中sklearn.linear_model.coordinate_descent.ElasticNetCV.fit方法的典型用法代码示例。如果您正苦于以下问题:Python ElasticNetCV.fit方法的具体用法?Python ElasticNetCV.fit怎么用?Python ElasticNetCV.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.coordinate_descent.ElasticNetCV
的用法示例。
在下文中一共展示了ElasticNetCV.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: runPrintResults
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def runPrintResults(X, y, alpha, name):
print(name+":\n=========")
if (alpha is not None):
X_new = np.divide(X, alpha)
else:
X_new = X
enetCV = ElasticNetCV(l1_ratio=0.8, fit_intercept=False) # cv=nCV, max_iter=5000
# enetCV = LassoCV(fit_intercept=False) # cv=nCV, max_iter=5000
enetCV.fit(X_new, y)
y_pred_enet = enetCV.predict(X_new)
r2_score_enet = r2_score(y, y_pred_enet)
print("R2= ", r2_score_enet)
if (alpha is not None):
enetCV_coef = np.divide(enetCV.coef_, alpha)
else:
enetCV_coef = enetCV.coef_
print("Best Alpha: {}".format(enetCV.alpha_))
# print("coefs_: {}".format(enetCV.coef_))
print("coefs_/alpha: {}".format(enetCV_coef))
return enetCV.alpha_, enetCV_coef
示例2: test_enet_path
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100, n_informative_features=100)
max_iter = 150
with warnings.catch_warnings():
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
warnings.simplefilter("ignore", UserWarning)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter)
clf.fit(X, y)
# Well-conditionned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
# Well-conditionned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditionned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
示例3: test_enet_path
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_path():
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
n_samples, n_features, max_iter = 50, 200, 50
random_state = np.random.RandomState(0)
w = random_state.randn(n_features)
w[10:] = 0.0 # only the top 10 features are impacting the model
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
max_iter=max_iter)
clf.fit(X, y)
assert_almost_equal(clf.alpha, 0.002, 2)
clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha, 0.002, 2)
# test set
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
assert clf.score(X_test, y_test) > 0.99
示例4: test_path_parameters
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 50
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter, l1_ratio=0.5)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
示例5: test_path_parameters
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
示例6: test_enet_path
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
示例7: test_1d_multioutput_enet_and_multitask_enet_cv
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
示例8: test_path_parameters
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
clf.fit(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
示例9: test_enet_cv_positive_constraint
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
示例10: test_enet_l1_ratio
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_l1_ratio():
# Test that an error message is raised if an estimator that
# uses _alpha_grid is called with l1_ratio=0
msg = ("Automatic alpha grid generation is not supported for l1_ratio=0. "
"Please supply a grid by providing your estimator with the "
"appropriate `alphas=` argument.")
X = np.array([[1, 2, 4, 5, 8], [3, 5, 7, 7, 8]]).T
y = np.array([12, 10, 11, 21, 5])
assert_raise_message(ValueError, msg, ElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y)
assert_raise_message(ValueError, msg, MultiTaskElasticNetCV(
l1_ratio=0, random_state=42).fit, X, y[:, None])
# Test that l1_ratio=0 is allowed if we supply a grid manually
alphas = [0.1, 10]
estkwds = {'alphas': alphas, 'random_state': 42}
est_desired = ElasticNetCV(l1_ratio=0.00001, **estkwds)
est = ElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est_desired.fit(X, y)
est.fit(X, y)
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
est_desired = MultiTaskElasticNetCV(l1_ratio=0.00001, **estkwds)
est = MultiTaskElasticNetCV(l1_ratio=0, **estkwds)
with ignore_warnings():
est.fit(X, y[:, None])
est_desired.fit(X, y[:, None])
assert_array_almost_equal(est.coef_, est_desired.coef_, decimal=5)
示例11: test_enet_path
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_path():
X, y, X_test, y_test = build_dataset()
max_iter = 50
clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
max_iter=max_iter)
clf.fit(X, y)
assert_almost_equal(clf.alpha, 0.002, 2)
clf = ElasticNetCV(n_alphas=10, eps=1e-3, rho=0.95, cv=5,
max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha, 0.002, 2)
# test set
assert clf.score(X_test, y_test) > 0.99
示例12: test_path_parameters
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_path_parameters():
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
n_samples, n_features, max_iter = 50, 200, 50
random_state = np.random.RandomState(0)
w = random_state.randn(n_features)
w[10:] = 0.0 # only the top 10 features are impacting the model
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
rho=0.5)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.rho)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas))
示例13: train_all
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def train_all(self):
positions = ['PG.csv', 'SG.csv', 'SF.csv', 'PF.csv', 'C.csv']
with open(self.models_file_path, 'w') as model_file:
model_file_writer = csv.writer(model_file)
for (first, filename) in izip(chain((True,), repeat(False)), positions):
with open(os.path.join(self.cleaned_data_directory_path, filename),
'r') as cleaned_data:
cleaned_data_reader = csv.reader(cleaned_data)
cleaned_data_headers = cleaned_data_reader.next()
lines = [map(float, line[:-1]) + line[-1:] for line in cleaned_data_reader
if len(line) >= 2]
# conver lines to numpy array
num_data = len(lines)
num_features = len(lines[0]) - 2
X = np.zeros((num_data, num_features))
Y = np.zeros((num_data))
for (i, data) in enumerate(lines):
for (ii, feature) in enumerate(data[:-2]):
X[i][ii] = feature
Y[i] = lines[i][-2] # last one is name
# create an instance of elasticnet
net = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7, 1], cv=3, normalize=True)
# create a model based on our data
net.fit(X, Y)
if first:
model_file_writer.writerow(cleaned_data_headers[:-2])
model_file_writer.writerow(net.coef_)
with open(os.path.join(
self.residual_data_path,
'_'.join(('resid', filename))), 'w') as resid_file:
resid_file_writer = csv.writer(resid_file)
# get the residuals
resid = X.dot(net.coef_) - Y
for (name, row) in izip(imap(lambda l: l[-1], lines), resid):
resid_file_writer.writerow((name, row))
print sum(resid)
示例14: test_enet_path
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_enet_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
with warnings.catch_warnings():
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
warnings.simplefilter("ignore", UserWarning)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, rho=[0.9, 0.95], cv=3, max_iter=max_iter)
clf.fit(X, y)
assert_almost_equal(clf.alpha, 0.002, 2)
assert_equal(clf.rho_, 0.95)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, rho=[0.9, 0.95], cv=3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha, 0.002, 2)
assert_equal(clf.rho_, 0.95)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
示例15: test_sparse_input_dtype_enet_and_lassocv
# 需要导入模块: from sklearn.linear_model.coordinate_descent import ElasticNetCV [as 别名]
# 或者: from sklearn.linear_model.coordinate_descent.ElasticNetCV import fit [as 别名]
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)