本文整理汇总了Python中sklearn.utils.extmath.svd_flip方法的典型用法代码示例。如果您正苦于以下问题:Python extmath.svd_flip方法的具体用法?Python extmath.svd_flip怎么用?Python extmath.svd_flip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.utils.extmath
的用法示例。
在下文中一共展示了extmath.svd_flip方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_svd_flip
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
示例2: _my_svd
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def _my_svd(M, k, algorithm):
if algorithm == 'randomized':
(U, S, V) = randomized_svd(
M, n_components=min(k, M.shape[1]-1), n_oversamples=20)
elif algorithm == 'arpack':
(U, S, V) = svds(M, k=min(k, min(M.shape)-1))
S = S[::-1]
U, V = svd_flip(U[:, ::-1], V[::-1])
else:
raise ValueError("unknown algorithm")
return (U, S, V)
示例3: _svd_flip_copy
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def _svd_flip_copy(x, y, u_based_decision=True):
# If the array is locked, copy the array and transpose it
# This happens with a very large array > 1TB
# GH: issue 592
try:
return skm.svd_flip(x, y, u_based_decision=u_based_decision)
except ValueError:
return skm.svd_flip(x.copy(), y.copy(), u_based_decision=u_based_decision)
示例4: svd_flip
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def svd_flip(u, v):
u2, v2 = delayed(_svd_flip_copy, nout=2)(u, v)
u = da.from_delayed(u2, shape=u.shape, dtype=u.dtype)
v = da.from_delayed(v2, shape=v.shape, dtype=v.dtype)
return u, v
示例5: compute_svd
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def compute_svd(X, n_components, n_iter, random_state, engine):
"""Computes an SVD with k components."""
# Determine what SVD engine to use
if engine == 'auto':
engine = 'sklearn'
# Compute the SVD
if engine == 'fbpca':
if FBPCA_INSTALLED:
U, s, V = fbpca.pca(X, k=n_components, n_iter=n_iter)
else:
raise ValueError('fbpca is not installed; please install it if you want to use it')
elif engine == 'sklearn':
U, s, V = extmath.randomized_svd(
X,
n_components=n_components,
n_iter=n_iter,
random_state=random_state
)
else:
raise ValueError("engine has to be one of ('auto', 'fbpca', 'sklearn')")
U, V = extmath.svd_flip(U, V)
return U, s, V
示例6: _pca_with_sparse
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def _pca_with_sparse(X, npcs, solver='arpack', mu=None, random_state=None):
random_state = check_random_state(random_state)
np.random.set_state(random_state.get_state())
random_init = np.random.rand(np.min(X.shape))
X = check_array(X, accept_sparse=['csr', 'csc'])
if mu is None:
mu = X.mean(0).A.flatten()[None, :]
mdot = mu.dot
mmat = mdot
mhdot = mu.T.dot
mhmat = mu.T.dot
Xdot = X.dot
Xmat = Xdot
XHdot = X.T.conj().dot
XHmat = XHdot
ones = np.ones(X.shape[0])[None, :].dot
def matvec(x):
return Xdot(x) - mdot(x)
def matmat(x):
return Xmat(x) - mmat(x)
def rmatvec(x):
return XHdot(x) - mhdot(ones(x))
def rmatmat(x):
return XHmat(x) - mhmat(ones(x))
XL = LinearOperator(
matvec=matvec,
dtype=X.dtype,
matmat=matmat,
shape=X.shape,
rmatvec=rmatvec,
rmatmat=rmatmat,
)
u, s, v = svds(XL, solver=solver, k=npcs, v0=random_init)
u, v = svd_flip(u, v)
idx = np.argsort(-s)
v = v[idx, :]
X_pca = (u * s)[:, idx]
ev = s[idx] ** 2 / (X.shape[0] - 1)
total_var = _get_mean_var(X)[1].sum()
ev_ratio = ev / total_var
output = {
'X_pca': X_pca,
'variance': ev,
'variance_ratio': ev_ratio,
'components': v,
}
return output
示例7: fit
# 需要导入模块: from sklearn.utils import extmath [as 别名]
# 或者: from sklearn.utils.extmath import svd_flip [as 别名]
def fit(self):
"""Fit the model by computing full SVD on m.
SVD factors the matrix m as u * np.diag(s) * v, where u and v are
unitary and s is a 1-d array of m‘s singular values. Note that the SVD
is commonly written as a = U S V.H, and the v returned by this function
is V.H (the Hermitian transpose). Therefore, we denote V.H as vt, and
back into the actual v, denoted just v.
The decomposition uses np.linalg.svd with full_matrices=False, so for
m with shape (M, N), then the shape of:
- u is (M, K)
- v is (K, N
where K = min(M, N)
Intertia is the percentage of explained variance.
Returns
-------
self, to enable method chaining
"""
self.n_samples, self.n_features = self.ms.shape
self.u, self.s, self.vt = np.linalg.svd(self.ms, full_matrices=False)
self.v = self.vt.T
# sklearn's implementation is to guarantee that the left and right
# singular vectors (U and V) are always the same, by imposing the
# that the largest coefficient of U in absolute value is positive
# This implementation uses u_based_decision=False rather than the
# default True to flip that logic and ensure the resulting
# components and loadings have high positive coefficients
self.u, self.vt = svd_flip(
self.u, self.v, u_based_decision=self.u_based_decision
)
self.v = self.vt.T
# Drop eigenvalues with value > threshold
# *keep* is number of components retained
self.eigenvalues = self.s ** 2 / self.n_samples
self.keep = np.count_nonzero(self.eigenvalues > self.threshold)
self.inertia = (self.eigenvalues / self.eigenvalues.sum())[: self.keep]
self.cumulative_inertia = self.inertia.cumsum()[: self.keep]
self.eigenvalues = self.eigenvalues[: self.keep]
return self