本文整理汇总了Python中scipy.sparse.linalg.eigs方法的典型用法代码示例。如果您正苦于以下问题:Python linalg.eigs方法的具体用法?Python linalg.eigs怎么用?Python linalg.eigs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.sparse.linalg
的用法示例。
在下文中一共展示了linalg.eigs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: scaled_laplacian
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def scaled_laplacian(W):
'''
Normalized graph Laplacian
Parameters
----------
W: np.ndarray, adjacency matrix,
shape is (num_of_vertices, num_of_vertices)
Returns
----------
np.ndarray, shape is (num_of_vertices, num_of_vertices)
'''
num_of_vertices = W.shape[0]
d = np.sum(W, axis=1)
L = np.diag(d) - W
for i in range(num_of_vertices):
for j in range(num_of_vertices):
if (d[i] > 0) and (d[j] > 0):
L[i, j] = L[i, j] / np.sqrt(d[i] * d[j])
# lambda_max \approx 2.0, the largest eigenvalues of L.
lambda_max = eigs(L, k=1, which='LR')[0][0].real
return 2 * L / lambda_max - np.identity(num_of_vertices)
示例2: check_stability
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def check_stability(self, verbose=False):
"""
Check that the weight matrix is stable
:return:
"""
if self.K < 100:
eigs = np.linalg.eigvals(self.weight_model.W_effective)
maxeig = np.amax(np.real(eigs))
else:
from scipy.sparse.linalg import eigs
maxeig = eigs(self.weight_model.W_effective, k=1)[0]
if verbose:
print("Max eigenvalue: ", maxeig)
return maxeig < 1.0
示例3: learn_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def learn_embedding(self, graph=None, edge_f=None,
is_weighted=False, no_python=False):
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if not graph:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
graph = graph.to_undirected()
t1 = time()
L_sym = nx.normalized_laplacian_matrix(graph)
try:
w, v = lg.eigs(L_sym, k=self._d + 1, which='SM')
t2 = time()
self._X = v[:, 1:]
p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T))
eig_err = np.linalg.norm(p_d_p_t - L_sym)
print ('Laplacian matrix recon. error (low rank): %f' % eig_err)
return self._X, (t2 - t1)
except:
print ('SVD did not converge. Assigning random emebdding')
self._X = np.random.randn(L_sym.shape[0], self._d)
t2 = time()
return self._X, (t2 - t1)
示例4: getLeadingEigenvector
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def getLeadingEigenvector(A, normalized=True, lanczosVecs = 15, maxiter = 1000):
"""Compute normalized leading eigenvector of a given matrix A.
@param A: sparse matrix for which leading eigenvector will be computed
@param normalized: wheter or not to normalize. Default is C{True}
@param lanczosVecs: number of Lanczos vectors to be used in the approximate
calculation of eigenvectors and eigenvalues. This maps to the ncv parameter
of scipy's underlying function eigs.
@param maxiter: scaling factor for the number of iterations to be used in the
approximate calculation of eigenvectors and eigenvalues. The number of iterations
passed to scipy's underlying eigs function will be n*maxiter where n is the
number of rows/columns of the Laplacian matrix.
"""
if _sparse.issparse(A) == False:
raise TypeError("A must be a sparse matrix")
# NOTE: ncv sets additional auxiliary eigenvectors that are computed
# NOTE: in order to be more confident to find the one with the largest
# NOTE: magnitude, see https://github.com/scipy/scipy/issues/4987
w, pi = _sla.eigs( A, k=1, which="LM", ncv=lanczosVecs, maxiter=maxiter)
pi = pi.reshape(pi.size,)
if normalized:
pi /= sum(pi)
return pi
示例5: get_pagerank_with_teleportation_from_transition_matrix
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def get_pagerank_with_teleportation_from_transition_matrix(rw_transition, rw_transition_t, rho):
number_of_nodes = rw_transition.shape[0]
# Set up the random walk with teleportation matrix.
non_teleportation = 1-rho
mv = lambda l, v: non_teleportation*l.dot(v) + (rho/number_of_nodes)*np.ones_like(v)
teleport = lambda vec: mv(rw_transition_t, vec)
rw_transition_operator = spla.LinearOperator(rw_transition.shape, matvec=teleport, dtype=np.float64)
# Calculate stationary distribution.
try:
eigenvalue, stationary_distribution = spla.eigs(rw_transition_operator,
k=1,
which='LM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
stationary_distribution = e.eigenvectors
stationary_distribution = stationary_distribution.flatten().real/stationary_distribution.sum()
return stationary_distribution
示例6: test_power_method
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def test_power_method(self):
"""test for inverse power iteration method"""
# solve eigenvalue problem in TT format
evp.power_method(self.operator_tt, self.initial_tt, operator_gevp=self.operator_gevp)
eigenvalue_tt, eigenvector_tt = evp.power_method(self.operator_tt, self.initial_tt)
# solve eigenvalue problem in matrix format
eigenvalue_mat, eigenvector_mat = splin.eigs(self.operator_mat, k=1)
# compute relative error between exact and approximate eigenvalues
rel_err_val = np.abs(eigenvalue_mat - eigenvalue_tt) / np.abs(eigenvalue_mat)
# compute relative error between exact and approximate eigenvectors
norm_1 = np.linalg.norm(eigenvector_mat + eigenvector_tt.matricize()[:, None])
norm_2 = np.linalg.norm(eigenvector_mat - eigenvector_tt.matricize()[:, None])
rel_err_vec = np.amin([norm_1, norm_2]) / np.linalg.norm(eigenvector_mat)
# check if relative errors are smaller than tolerance
self.assertLess(rel_err_val, self.tol_eigval)
self.assertLess(rel_err_vec, self.tol_eigvec)
示例7: learn_embedding
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def learn_embedding(self, graph=None, edge_f=None,
is_weighted=False, no_python=False):
if not graph and not edge_f:
raise Exception('graph/edge_f needed')
if not graph:
graph = graph_util.loadGraphFromEdgeListTxt(edge_f)
graph = graph.to_undirected()
t1 = time()
L_sym = nx.normalized_laplacian_matrix(graph)
w, v = lg.eigs(L_sym, k=self._d + 1, which='SM')
idx = np.argsort(w) # sort eigenvalues
w = w[idx]
v = v[:, idx]
t2 = time()
self._X = v[:, 1:]
p_d_p_t = np.dot(v, np.dot(np.diag(w), v.T))
eig_err = np.linalg.norm(p_d_p_t - L_sym)
print('Laplacian matrix recon. error (low rank): %f' % eig_err)
return self._X.real, (t2 - t1)
示例8: _method_2
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def _method_2(data, num_pcs=None):
"""Compute OPCA when num_observations <= num_dimensions."""
data = np.nan_to_num(data - nanmean(data, axis=0))
T = data.shape[0]
tmp = np.dot(data, data.T)
corr_offset = np.zeros(tmp.shape)
corr_offset[1:] = tmp[:-1]
corr_offset[:-1] += tmp[1:]
if num_pcs is None:
eivals, eivects = eig(corr_offset)
else:
eivals, eivects = eigs(corr_offset, num_pcs, which='LR')
eivals = np.real(eivals)
eivects = np.real(eivects)
idx = np.argsort(-eivals) # sort the eigenvectors and eigenvalues
eivals = old_div(eivals[idx], (2. * (T - 1)))
eivects = eivects[:, idx]
transformed_eivects = np.dot(data.T, eivects)
for i in range(transformed_eivects.shape[1]): # normalize the eigenvectors
transformed_eivects[:, i] /= np.linalg.norm(transformed_eivects[:, i])
return eivals, transformed_eivects, np.dot(data, transformed_eivects)
示例9: ldos0d_wf
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def ldos0d_wf(h,e=0.0,delta=0.01,num_wf = 10,robust=False,tol=0):
"""Calculates the local density of states of a hamiltonian and
writes it in file, using arpack"""
if h.dimensionality==0: # only for 0d
intra = csc_matrix(h.intra) # matrix
else: raise # not implemented...
if robust: # go to the imaginary axis for stability
eig,eigvec = slg.eigs(intra,k=int(num_wf),which="LM",
sigma=e+1j*delta,tol=tol)
eig = eig.real # real part only
else: # Hermitic Hamiltonian
eig,eigvec = slg.eigsh(intra,k=int(num_wf),which="LM",sigma=e,tol=tol)
d = np.array([0.0 for i in range(intra.shape[0])]) # initialize
for (v,ie) in zip(eigvec.transpose(),eig): # loop over wavefunctions
v2 = (np.conjugate(v)*v).real # square of wavefunction
fac = delta/((e-ie)**2 + delta**2) # factor to create a delta
d += fac*v2 # add contribution
# d /= num_wf # normalize
d /= np.pi # normalize
d = spatial_dos(h,d) # resum if necessary
g = h.geometry # store geometry
write_ldos(g.x,g.y,d,z=g.z) # write in file
示例10: ldos_arpack
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def ldos_arpack(intra,num_wf=10,robust=False,tol=0,e=0.0,delta=0.01):
"""Use arpack to calculate hte local density of states at a certain energy"""
if robust: # go to the imaginary axis for stability
eig,eigvec = slg.eigs(intra,k=int(num_wf),which="LM",
sigma=e+1j*delta,tol=tol)
eig = eig.real # real part only
else: # Hermitic Hamiltonian
eig,eigvec = slg.eigsh(intra,k=int(num_wf),which="LM",sigma=e,tol=tol)
d = np.array([0.0 for i in range(intra.shape[0])]) # initialize
for (v,ie) in zip(eigvec.transpose(),eig): # loop over wavefunctions
v2 = (np.conjugate(v)*v).real # square of wavefunction
fac = delta/((e-ie)**2 + delta**2) # factor to create a delta
d += fac*v2 # add contribution
# d /= num_wf # normalize
d /= np.pi # normalize
return d
示例11: estimate
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def estimate(self, preference_matrix):
super()._check_matrix(preference_matrix)
width = preference_matrix.shape[0]
_, vectors = eigs(preference_matrix, k=(width-2), sigma=width, which='LM', v0=np.ones(width))
real_vector = np.real([vec for vec in np.transpose(vectors) if not np.all(np.imag(vec))][:1])
sum_vector = np.sum(real_vector)
self._evaluate_consistency(preference_matrix)
return np.around(real_vector, decimals=3)[0] / sum_vector
示例12: getSlowDownFactor
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def getSlowDownFactor(self, k=2, lanczosVecs = 15, maxiter = 1000):
"""
Returns a factor S that indicates how much slower (S>1) or faster (S<1)
a diffusion process evolves in a k-order model of the path statistics
compared to what is expected based on a first-order model. This value captures
the effect of order correlations of length k on a diffusion process which evolves
based on the observed paths.
"""
assert k>1, 'Slow-down factor can only be calculated for orders larger than one'
#NOTE to myself: most of the time goes for construction of the 2nd order
#NOTE null graph, then for the 2nd order null transition matrix
gk = HigherOrderNetwork(self, k=k)
gkn = HigherOrderNetwork(self, k=k, nullModel = True)
Log.add('Calculating slow down factor ... ', Severity.INFO)
# Build transition matrices
Tk = gk.getTransitionMatrix()
Tkn = gkn.getTransitionMatrix()
# Compute eigenvector sequences
# NOTE: ncv=13 sets additional auxiliary eigenvectors that are computed
# NOTE: in order to be more confident to find the one with the largest
# NOTE: magnitude, see
# NOTE: https://github.com/scipy/scipy/issues/4987
w2 = _sla.eigs(Tk, which="LM", k=2, ncv=lanczosVecs, return_eigenvectors=False, maxiter=maxiter)
evals2_sorted = _np.sort(-_np.absolute(w2))
w2n = _sla.eigs(Tkn, which="LM", k=2, ncv=lanczosVecs, return_eigenvectors=False, maxiter=maxiter)
evals2n_sorted = _np.sort(-_np.absolute(w2n))
Log.add('finished.', Severity.INFO)
return _np.log(_np.abs(evals2n_sorted[1]))/_np.log(_np.abs(evals2_sorted[1]))
示例13: getEigenValueGap
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def getEigenValueGap(self, includeSubPaths=True, lanczosVecs = 15, maxiter = 20):
"""
Returns the eigenvalue gap of the transition matrix.
@param includeSubPaths: whether or not to include subpath statistics in the
calculation of transition probabilities.
"""
#NOTE to myself: most of the time goes for construction of the 2nd order
#NOTE null graph, then for the 2nd order null transition matrix
Log.add('Calculating eigenvalue gap ... ', Severity.INFO)
# Build transition matrices
T = self.getTransitionMatrix(includeSubPaths)
# Compute the two largest eigenvalues
# NOTE: ncv sets additional auxiliary eigenvectors that are computed
# NOTE: in order to be more confident to actually find the one with the largest
# NOTE: magnitude, see https://github.com/scipy/scipy/issues/4987
w2 = _sla.eigs(T, which="LM", k=2, ncv=lanczosVecs, return_eigenvectors=False, maxiter = maxiter)
evals2_sorted = _np.sort(-_np.absolute(w2))
Log.add('finished.', Severity.INFO)
return _np.abs(evals2_sorted[1])
示例14: est_CompGraph_norm
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def est_CompGraph_norm(K, tol=1e-3, try_fast_norm=True):
"""Estimates operator norm for L = ||K||.
Parameters
----------
tol : float
Accuracy of estimate if not trying for upper bound.
try_fast_norm : bool
Whether to try for a fast upper bound.
Returns
-------
float
Estimate of ||K||.
"""
if try_fast_norm:
output_mags = [NotImplemented]
K.norm_bound(output_mags)
if NotImplemented not in output_mags:
return output_mags[0]
input_data = np.zeros(K.input_size)
output_data = np.zeros(K.output_size)
def KtK(x):
K.forward(x, output_data)
K.adjoint(output_data, input_data)
return input_data
# Define linear operator
A = LinearOperator((K.input_size, K.input_size),
KtK, KtK)
Knorm = np.sqrt(eigs(A, k=1, M=None, sigma=None, which='LM', tol=tol)[0].real)
return np.float(Knorm)
示例15: init_matrices
# 需要导入模块: from scipy.sparse import linalg [as 别名]
# 或者: from scipy.sparse.linalg import eigs [as 别名]
def init_matrices(self):
self.W_in = (2.0*np.random.rand(self.N_x,self.num_dim)-1.0)/(2.0*self.scaleW_in)
converged = False
i =0
# repeat because could not converge to find eigenvalues
while(not converged):
i+=1
# generate sparse, uniformly distributed weights
self.W = sparse.rand(self.N_x,self.N_x,density=self.connect).todense()
# ensure that the non-zero values are uniformly distributed
self.W[np.where(self.W>0)] -= 0.5
try:
# get the largest eigenvalue
eig, _ = slinalg.eigs(self.W,k=1,which='LM')
converged = True
except:
print('not converged ',i)
continue
# adjust the spectral radius
self.W /= np.abs(eig)/self.rho