本文整理汇总了Python中scipy.sparse.linalg.svds方法的典型用法代码示例。如果您正苦于以下问题:Python linalg.svds方法的具体用法?Python linalg.svds怎么用?Python linalg.svds使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.sparse.linalg
的用法示例。
在下文中一共展示了linalg.svds方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: learn_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def learn_embedding(self):
graph = self.g.G
A = nx.to_numpy_matrix(graph)
# self._beta = 0.0728
# M_g = np.eye(graph.number_of_nodes()) - self._beta * A
# M_l = self._beta * A
M_g = np.eye(graph.number_of_nodes())
M_l = np.dot(A, A)
S = np.dot(np.linalg.inv(M_g), M_l)
# s: \sigma_k
u, s, vt = lg.svds(S, k=self._d // 2)
sigma = np.diagflat(np.sqrt(s))
X1 = np.dot(u, sigma)
X2 = np.dot(vt.T, sigma)
# self._X = X2
self._X = np.concatenate((X1, X2), axis=1)
示例2: learn_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def learn_embedding(self):
graph = self.g.G
graph = graph.to_undirected()
t1 = time()
A = nx.to_scipy_sparse_matrix(graph)
# print(np.sum(A.todense(), axis=0))
normalize(A, norm='l1', axis=1, copy=False)
I_n = sp.eye(graph.number_of_nodes())
I_min_A = I_n - A
print(I_min_A)
u, s, vt = lg.svds(I_min_A, k=self._d + 1, which='SM')
t2 = time()
self._X = vt.T
self._X = self._X[:, 1:]
return self._X, (t2 - t1)
# I_n = sp.eye(graph.number_of_nodes())
示例3: add_data
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def add_data(self, g, h, trn_graph, trn_x_index, trn_y_index, tst_graph, tst_x_index, tst_y_index, k=500,
pos_up_ratio=5.0):
"""
"""
self.g = g # ng * ng
self.h = h # nh * nh
self.trn_graph = trn_graph # ng * nh (data are the corresponding instances)
self.tst_graph = tst_graph # ng * nh (data are the corresponding instances)
self.ng = g.shape[0]
self.nh = h.shape[0]
self.sym_g = self.gen_sym_graph(self.g)
self.sym_h = self.gen_sym_graph(self.h)
U, s, Vh = svdp(self.trn_graph, k=k)
self.gX = U * np.sqrt(s)
self.hX = Vh.T * np.sqrt(s)
self.pos_trn_x_index, self.pos_trn_y_index = self.trn_graph.nonzero()
self.trn_x_index, self.trn_y_index = trn_x_index, trn_y_index
self.tst_x_index, self.tst_y_index = tst_x_index, tst_y_index
self.pos_up_ratio = pos_up_ratio
print 'bipartite shape:', trn_graph.shape
print 'pos_num:', len(self.pos_trn_x_index)
print 'total training:', len(self.trn_x_index)
print 'pos_up_ratio:', self.pos_up_ratio
示例4: learn_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def learn_embedding(self, graph=None, edge_f=None,
is_weighted=False, no_python=False):
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if not graph:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
graph = graph.to_undirected()
t1 = time()
A = nx.to_scipy_sparse_matrix(graph)
normalize(A, norm='l1', axis=1, copy=False)
I_n = sp.eye(graph.number_of_nodes())
I_min_A = I_n - A
try:
u, s, vt = lg.svds(I_min_A, k=self._d + 1, which='SM')
except:
u = np.random.randn(A.shape[0], self._d + 1)
s = np.random.randn(self._d + 1, self._d + 1)
vt = np.random.randn(self._d + 1, A.shape[0])
t2 = time()
self._X = vt.T
self._X = self._X[:, 1:]
return self._X, (t2 - t1)
示例5: post_proC
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def post_proC(C, K, d, alpha):
# C: coefficient matrix, K: number of clusters, d: dimension of each subspace
C = 0.5*(C + C.T)
r = min(d*K + 1, C.shape[0]-1)
U, S, _ = svds(C, r, v0=np.ones(C.shape[0]))
U = U[:,::-1]
S = np.sqrt(S[::-1])
S = np.diag(S)
U = U.dot(S)
U = normalize(U, norm='l2', axis = 1)
Z = U.dot(U.T)
Z = Z * (Z>0)
L = np.abs(Z ** alpha)
L = L/L.max()
L = 0.5 * (L + L.T)
spectral = cluster.SpectralClustering(n_clusters=K, eigen_solver='arpack', affinity='precomputed', assign_labels='discretize', random_state=66)
spectral.fit(L)
grp = spectral.fit_predict(L) + 1
return grp, L
示例6: svddenseblock
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def svddenseblock(m, rbd='avg'):
m = m.asfptype()
u, s, vt = slin.svds(m, k=1, which='LM')
u1 = u[:,0]
v1 = vt[0,:]
s1 = s[0]
if abs(max(u1)) < abs(min(u1)):
u1 = -1*u1
if abs(max(v1)) < abs(min(v1)):
v1 = -1*v1
sqrtS1 = math.sqrt(s1)
if type(rbd) is float:
rows = ((sqrtS1*u1)>=rbd).astype(int)
cols = ((sqrtS1*v1)>=rbd).astype(int)
elif rbd == 'avg':
nrow, ncol = m.shape
rows = (u1>=1.0/math.sqrt(nrow)).astype(int)
cols = (v1>=1.0/math.sqrt(ncol)).astype(int)
#rows = np.round(sqrtS1*u1).astype(int)
#cols = np.round(sqrtS1*v1).astype(int)
return rows, cols
示例7: build
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def build(self, operator=None, return_factors='vh'):
if operator is not None:
svd_matrix = operator
else:
svd_matrix = self.get_training_matrix(dtype=np.float64)
svd_params = dict(k=self.rank, return_singular_vectors=return_factors)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
user_factors, sigma, item_factors = svds(svd_matrix, **svd_params)
if user_factors is not None:
user_factors = np.ascontiguousarray(user_factors[:, ::-1])
if item_factors is not None:
item_factors = np.ascontiguousarray(item_factors[::-1, :]).T
if sigma is not None:
sigma = np.ascontiguousarray(sigma[::-1])
self.factors[self.data.fields.userid] = user_factors
self.factors[self.data.fields.itemid] = item_factors
self.factors['singular_values'] = sigma
示例8: learn_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def learn_embedding(self, graph=None, edge_f=None,
is_weighted=False, no_python=False):
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if not graph:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
graph = graph.to_undirected()
t1 = time()
A = nx.to_scipy_sparse_matrix(graph)
normalize(A, norm='l1', axis=1, copy=False)
I_n = sp.eye(len(graph.nodes))
I_min_A = I_n - A
u, s, vt = lg.svds(I_min_A, k=self._d + 1, which='SM')
t2 = time()
self._X = vt.T
self._X = self._X[:, 1:]
return self._X.real, (t2 - t1)
示例9: SVD_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def SVD_embedding(G, output_filename, size=100):
node_list = list(G.nodes())
adjacency_matrix = nx.adjacency_matrix(G, node_list)
adjacency_matrix = adjacency_matrix.astype(float)
# adjacency_matrix = sparse.csc_matrix(adjacency_matrix)
U, Sigma, VT = svds(adjacency_matrix, k=size)
Sigma = np.diag(Sigma)
W = np.matmul(U, np.sqrt(Sigma))
C = np.matmul(VT.T, np.sqrt(Sigma))
# print(np.sum(U))
embeddings = W + C
vectors = {}
for id, node in enumerate(node_list):
vectors[node] = list(np.array(embeddings[id]))
fout = open(output_filename, 'w')
node_num = len(vectors.keys())
fout.write("{} {}\n".format(node_num, size))
for node, vec in vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
return
示例10: fix_scipy_svds
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def fix_scipy_svds(scipy_svds):
r"""
scipy.sparse.linalg.svds orders the singular values in increasing order.
This function flips this order.
Parameters
----------
scipy_svds: scipy.sparse.linalg.svds
Returns
-------
U, D, V
ordered in decreasing singular values
"""
U, D, V = scipy_svds
sv_reordering = np.argsort(-D)
U = U[:, sv_reordering]
D = D[sv_reordering]
V = V.T[:, sv_reordering]
return U, D, V
示例11: _svd
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
示例12: coil_compress
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def coil_compress(kspace, out_coils):
if kspace.shape[0] <= out_coils:
return kspace
kspace = kspace.numpy()
kspace = kspace[..., 0] + 1j * kspace[..., 1]
start_shape = tuple(kspace.shape)
in_coils = start_shape[0]
kspace = kspace.reshape(in_coils, -1)
try:
if in_coils == 5:
u, _, _ = svd(kspace, full_matrices=False)
else:
u, _, _ = svds(kspace, k=out_coils)
except Exception as e:
print("SVD failed: ", kspace.shape)
traceback.print_exc(file=sys.stdout)
raise e
u = np.transpose(np.conj(u[:, :out_coils]))
new_shape = (out_coils, ) + start_shape[1:]
new_kspace = u @ kspace
kspace = np.reshape(new_kspace, new_shape)
kspace = torch.stack((torch.Tensor(np.real(kspace)), torch.Tensor(np.imag(kspace))), dim=-1)
return kspace
示例13: _my_svd
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def _my_svd(M, k, algorithm):
if algorithm == 'randomized':
(U, S, V) = randomized_svd(
M, n_components=min(k, M.shape[1]-1), n_oversamples=20)
elif algorithm == 'arpack':
(U, S, V) = svds(M, k=min(k, min(M.shape)-1))
S = S[::-1]
U, V = svd_flip(U[:, ::-1], V[::-1])
else:
raise ValueError("unknown algorithm")
return (U, S, V)
示例14: __init__
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def __init__(self, DF, cols=None, ncols=None, benzecri=True, TOL=1e-4,
sparse=False, approximate=False):
X, self.K, self.J = process_df(DF, cols, ncols)
S = X.sum().sum()
Z = X / S # correspondence matrix
self.r = Z.sum(axis=1)
self.c = Z.sum()
self.cor = benzecri
eps = finfo(float).eps
self.D_r = (diags if sparse else diag)(1/(eps + sqrt(self.r)))
self.D_c = diag(1/(eps + sqrt(self.c))) # can't use diags here
Z_c = Z - outer(self.r, self.c) # standardized residuals matrix
product = self.D_r.dot(Z_c).dot(self.D_c)
if sparse:
P, s, Q = svds(product, min(product.shape)-1 if ncols is None else ncols)
# svds and svd use complementary orders
self.P = P.T[::-1].T
self.Q = Q[::-1]
self.s = s[::-1]
self._numitems = min(product.shape)-1
else:
self._numitems = len(DF)
self.P, self.s, self.Q = svd(product)
self.E = None
E = self._benzecri() if self.cor else self.s**2
self.inertia = sum(E)
self.rank = argmax(E < TOL)
if not self.rank: self.rank = len(E)
self.L = E[:self.rank]
示例15: _svd
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import svds [as 别名]
def _svd(cls, matrix, num_concepts=5):
"""
Perform singular value decomposition for dimensionality reduction of the input matrix.
"""
u, s, v = svds(matrix, k=num_concepts)
return u, s, v