本文整理汇总了Python中scikits.learn.linear_model.lars_path函数的典型用法代码示例。如果您正苦于以下问题:Python lars_path函数的具体用法?Python lars_path怎么用?Python lars_path使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lars_path函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_singular_matrix
def test_singular_matrix():
"""
Test when input is a singular matrix
"""
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0], [1, 0]])
示例2: test_lasso_gives_lstsq_solution
def test_lasso_gives_lstsq_solution():
"""
Test that Lars Lasso gives least square solution at the end
of the path
"""
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
示例3: test_lasso_lars_vs_lasso_cd
def test_lasso_lars_vs_lasso_cd(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results
"""
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False)
for (c, a) in zip(lasso_path.T, alphas):
lasso_cd.alpha = a
lasso_cd.fit(X, y, tol=1e-8)
error = np.linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2):
clf1 = linear_model.LassoLARS(alpha=alpha).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha).fit(X, y, tol=1e-8)
err = np.linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
示例4: compute_bench
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print '===================='
print 'Iteration %03d of %03d' % (it, max_it)
print '===================='
dataset_kwargs = {
'n_train_samples': n_samples,
'n_test_samples': 2,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
X, y, _, _, _ = make_regression_dataset(**dataset_kwargs)
gc.collect()
print "benching lars_path (with Gram):",
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print "%0.3fs" % delta
results['lars_path (with Gram)'].append(delta)
gc.collect()
print "benching lars_path (without Gram):",
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print "%0.3fs" % delta
results['lars_path (without Gram)'].append(delta)
gc.collect()
print "benching lasso_path (with Gram):",
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print "%0.3fs" % delta
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print "benching lasso_path (without Gram):",
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print "%0.3fs" % delta
results['lasso_path (without Gram)'].append(delta)
return results
示例5: compute_bench
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print '===================='
print 'Iteration %03d of %03d' % (it, max_it)
print '===================='
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print "benching lars_path (with Gram):",
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
lars_gram[i_f, i_s] = delta
gc.collect()
print "benching lars_path (without Gram):",
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
lars[i_f, i_s] = delta
gc.collect()
print "benching orthogonal_mp (with Gram):",
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
omp_gram[i_f, i_s] = delta
gc.collect()
print "benching orthogonal_mp (without Gram):",
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print "%0.3fs" % delta
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
示例6: load_diabetes
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
# add garbage features
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 10)]
n_samples = X.shape[0]
# Standardize the data to avoid intercept problems
y -= np.mean(y)
X -= np.mean(X, axis=0)
X /= np.std(X, axis=0)
print "Computing regularization path using the LARS ..."
alphas, features, coefs = lars_path(X, y, method='lasso', verbose=True)
###############################################################################
# BIC and AIC
K_aic = 2 # AIC
K_bic = log(n_samples) # BIC
R = y[:, np.newaxis] - np.dot(X, coefs) # residuals
mse = np.sum(R ** 2, axis=0) # MSE ie. mean square error
df = np.zeros(coefs.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coefs.T):
mask = coef != 0
if not np.any(mask):
continue
Xc = X[:, mask]
示例7: Regression
from scikits.learn import linear_model
from scikits.learn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X[:,6] *= -1 # To reproduce wikipedia LAR page
################################################################################
# Compute path functions
print "Computing regularization path using the LARS ..."
start = datetime.now()
_, _, coefs_ = linear_model.lars_path(X, y, max_features=10, method="lasso")
print "This took ", datetime.now() - start
###############################################################################
# Display path
xx = np.sum(np.abs(coefs_), axis=0)
xx /= xx[-1]
pl.plot(xx, coefs_.T)
ymin, ymax = pl.ylim()
pl.vlines(xx, ymin, ymax, linestyle='dashed')
pl.xlabel('|coef| / max|coef|')
pl.ylabel('Coefficients')
pl.title('Least Angle Regression (LAR) Path')
pl.axis('tight')
pl.show()