本文整理汇总了Python中sklearn.decomposition.SparsePCA.fit方法的典型用法代码示例。如果您正苦于以下问题:Python SparsePCA.fit方法的具体用法?Python SparsePCA.fit怎么用?Python SparsePCA.fit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.SparsePCA
的用法示例。
在下文中一共展示了SparsePCA.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_fit_transform
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
spca = SparsePCA(n_components=3, n_jobs=2, random_state=0,
alpha=alpha).fit(Y)
U2 = spca.transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
示例2: test_initialization
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
示例3: sccodedirect
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def sccodedirect():
"得到不带眼镜的RPCA结果"
nglassmodel = np.load('nglassline.npy').astype('f')
from sklearn.decomposition import SparsePCA
learning = SparsePCA(500,verbose=True)
learning.fit(nglassmodel)
import cPickle
cPickle.dump(learning,file('sparsepcadirect','wb'),-1)
示例4: sparse_pca
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def sparse_pca(self):
"""
Runs PCA on view and returns projected view, the principle components,
and explained variance.
"""
model = SparsePCA(n_components=param['components'], alpha=param['sparse_pca_alpha'])
model.fit(self.view)
return model.transform(self.view), model.components_
示例5: do_sparse_pca
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def do_sparse_pca(sparse_matrix):
# from skikit learn http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.SparsePCA.html#sklearn.decomposition.SparsePCA
dense_matrix = sparse_matrix.tobsr().toarray()
# instantiate the spca with some parameters
spca = SparsePCA(n_components=6, alpha=0.01, ridge_alpha=0.01, max_iter=1000, tol=1e-08, method='lars', n_jobs=1, U_init=None, V_init=None, verbose=False, random_state=None)
# train the spca with our matrix
spca.fit(dense_matrix)
# return the components
return spca.components_
示例6: test_initialization
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_initialization(norm_comp):
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng, normalize_components=norm_comp)
model.fit(rng.randn(5, 4))
if norm_comp:
assert_allclose(model.components_,
V_init / np.linalg.norm(V_init, axis=1)[:, None])
else:
assert_allclose(model.components_, V_init)
示例7: test_fit_transform
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
示例8: test_fit_transform_parallel
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
示例9: test_pca_vs_spca
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_pca_vs_spca():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2,
normalize_components=True)
pca = PCA(n_components=2)
pca.fit(Y)
spca.fit(Y)
results_test_pca = pca.transform(Z)
results_test_spca = spca.transform(Z)
assert_allclose(np.abs(spca.components_.dot(pca.components_.T)),
np.eye(2), atol=1e-5)
results_test_pca *= np.sign(results_test_pca[0, :])
results_test_spca *= np.sign(results_test_spca[0, :])
assert_allclose(results_test_pca, results_test_spca)
示例10: spca
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def spca(data, num_components=None, alpha=1):
# creates a matrix with sparse principal component analysis
# build matrix with all data
data = [d.flatten() for d in data if not any(isnan(d))]
datamatrix = row_stack(data)
# center data
cdata = datamatrix - mean(datamatrix, axis=0)
if num_components is None:
num_components = cdata.shape[0]
# do spca on matrix
spca = SparsePCA(n_components=num_components, alpha=alpha)
spca.fit(cdata)
# normalize components
components = spca.components_.T
for r in xrange(0,components.shape[1]):
compnorm = numpy.apply_along_axis(numpy.linalg.norm, 0, components[:,r])
if not compnorm == 0:
components[:,r] /= compnorm
components = components.T
# calc adjusted explained variance from "Sparse Principal Component Analysis" by Zou, Hastie, Tibshirani
spca.components_ = components
#nuz = spca.transform(cdata).T
nuz = ridge_regression(spca.components_.T, cdata.T, 0.01, solver='dense_cholesky').T
#nuz = dot(components, cdata.T)
q,r = qr(nuz.T)
cumulative_var = []
for i in range(1,num_components+1):
cumulative_var.append(trace(r[0:i,]*r[0:i,]))
explained_var = [math.sqrt(cumulative_var[0])]
for i in range(1,num_components):
explained_var.append(math.sqrt(cumulative_var[i])-math.sqrt(cumulative_var[i-1]))
order = numpy.argsort(explained_var)[::-1]
components = numpy.take(components,order,axis=0)
evars = numpy.take(explained_var,order).tolist()
#evars = numpy.take(explained_var,order)
#order2 = [0,1,2,4,5,7,12,19]
#components = numpy.take(components,order2,axis=0)
#evars = numpy.take(evars,order2).tolist()
return components, evars
示例11: test_fit_transform_tall
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
示例12: test_fit_transform
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
# Test that deprecated ridge_alpha parameter throws warning
warning_msg = "The ridge_alpha parameter on transform()"
assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
Y, ridge_alpha=0.01)
assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
Y, ridge_alpha=None)
示例13: SPCA
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
class SPCA(object):
"""
Wrapper for sklearn package. Performs sparse PCA
SPCA has 5 methods:
- fit(waveforms)
update class instance with ICA fit
- fit_transform()
do what fit() does, but additionally return the projection onto ICA space
- inverse_transform(A)
inverses the decomposition, returns waveforms for an input A, using Z
- get_basis()
returns the basis vectors Z^\dagger
- get_params()
returns metadata used for fits.
"""
def __init__(self, num_components=10,
catalog_name='unknown',
alpha = 0.1,
ridge_alpha = 0.01,
max_iter = 2000,
tol = 1e-9,
n_jobs = 1,
random_state = None):
self._decomposition = 'Sparse PCA'
self._num_components = num_components
self._catalog_name = catalog_name
self._alpha = alpha
self._ridge_alpha = ridge_alpha
self._n_jobs = n_jobs
self._max_iter = max_iter
self._tol = tol
self._random_state = random_state
self._SPCA = SparsePCA(n_components=self._num_components,
alpha = self._alpha,
ridge_alpha = self._ridge_alpha,
n_jobs = self._n_jobs,
max_iter = self._max_iter,
tol = self._tol,
random_state = self._random_state)
def fit(self,waveforms):
# TODO make sure there are more columns than rows (transpose if not)
# normalize waveforms
self._waveforms = waveforms
self._SPCA.fit(self._waveforms)
def fit_transform(self,waveforms):
# TODO make sure there are more columns than rows (transpose if not)
# normalize waveforms
self._waveforms = waveforms
self._A = self._SPCA.fit_transform(self._waveforms)
return self._A
def inverse_transform(self,A):
# convert basis back to waveforms using fit
new_waveforms = self._SPCA.inverse_transform(A)
return new_waveforms
def get_params(self):
# TODO know what catalog was used! (include waveform metadata)
params = self._SPCA.get_params()
params['num_components'] = params.pop('n_components')
params['Decompositon'] = self._decomposition
return params
def get_basis(self):
""" Return the SPCA basis vectors (Z^\dagger)"""
Zt = self._SPCA.components_
return Zt
示例14: fit
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
def fit(self, dif_df):
factorization = SparsePCA(n_components=self.n_components, alpha=0.03)
X = dif_df.values[1:]
self.ticker_symbols_used = dif_df.columns.values
factorization.fit(X)
self.factorization = factorization
示例15: list
# 需要导入模块: from sklearn.decomposition import SparsePCA [as 别名]
# 或者: from sklearn.decomposition.SparsePCA import fit [as 别名]
N = 500
P = 10
MU = [0] * P
T = 1 # spike level
K = 2 # sparsity level
V = list(range(1,K+1)) + [0]*(P-K)
V = V / np.linalg.norm(V)
SIG = np.identity(P) + T * np.matrix(V).transpose() * np.matrix(V)
X = np.matrix(np.random.multivariate_normal(MU,SIG,N))
#####
# using scikit-learn method for Sparse PCA (like an l1-regularized dictionary learning problem)
from sklearn.decomposition import SparsePCA
spca = SparsePCA(n_components=1, alpha=5)
spca.fit(X)
from sklearn.decomposition import PCA
pca = PCA(n_components=1)
pca.fit(X)
print('Classical 1st principal component:', pca.components_)
print('Sparse 1st principal component:', spca.components_)
#####
# TODO: SDP implementation a la El Ghaoui, Bach, D'Aspremont
import cvxopt
# TWO CONSTRAINTS
# trace = 1 (multiply with identity)
# l1 norm <= k (multiply with all 1s matrix)