本文整理汇总了Python中sandbox.util.Util.Util.indEig方法的典型用法代码示例。如果您正苦于以下问题:Python Util.indEig方法的具体用法?Python Util.indEig怎么用?Python Util.indEig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sandbox.util.Util.Util
的用法示例。
在下文中一共展示了Util.indEig方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testIncrementEigenSystem
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def testIncrementEigenSystem(self):
print "< testIncrementEigenSystem >"
numVertices = 10
graph = SparseGraph(GeneralVertexList(numVertices))
p = 0.4
generator = ErdosRenyiGenerator(p)
graph = generator.generate(graph)
W = graph.getWeightMatrix()
L = graph.laplacianMatrix()
degrees = graph.outDegreeSequence()
D = numpy.diag(degrees)
lmbda1, Q1 = scipy.linalg.eig(L, D)
lmbda1 = lmbda1.real
Q1 = Q1.dot(numpy.diag(numpy.diag(Q1.T.dot(D).dot(Q1))**-0.5))
tol = 10**-6
k = 3
inds = numpy.argsort(lmbda1)[0:k]
lmbda1, Q1 = Util.indEig(lmbda1, Q1, inds)
#Similarity change vector
w = graph.getEdge(5,7)
deltaW = 0.5
k = 3
clusterer = NingSpectralClustering(k)
lmbda2Approx, Q2Approx = clusterer.incrementEigenSystem(lmbda1, Q1, scipy.sparse.csr_matrix(W), 5, 7, deltaW)
#Compute real eigenvectors then compare against these
Lhat = L.copy();
Lhat[5,5] += deltaW; Lhat[7,7] += deltaW
Lhat[5,7] -= deltaW; Lhat[7,5] -= deltaW
Dhat = numpy.diag(numpy.diag(Lhat))
lmbda2, Q2 = scipy.linalg.eig(Lhat, Dhat)
lmbda2, Q2 = Util.indEig(lmbda2, Q2, inds)
Q2Approx = Q2Approx.dot(numpy.diag(numpy.diag(Q2Approx.T.dot(Q2Approx))**-0.5))
Q2 = Q2.dot(numpy.diag(numpy.sum(Q2**2, 0)**-0.5))
Q1 = Q1.dot(numpy.diag(numpy.sum(Q1**2, 0)**-0.5))
#Errors in the eigenvalues
logging.debug("Eigenvalue Errors")
logging.debug(numpy.linalg.norm(lmbda2 - lmbda2Approx))
logging.debug(numpy.linalg.norm(lmbda2 - lmbda1))
#Compute error according to the paper
error = numpy.sum(1 - numpy.diag(Q2.T.dot(Q2Approx))**2)
error2 = numpy.sum(1 - numpy.diag(Q2.T.dot(Q1))**2)
logging.debug("Eigenvector Errors")
logging.debug(error)
logging.debug(error2)
示例2: eigenAdd
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def eigenAdd(omega, Q, Y, k):
"""
Perform an eigen update of the form A*A + Y*Y in which Y is a low-rank matrix
and A^*A = Q Omega Q*. We use the rank-k approximation of A: Q_k Omega_k Q_k^*
and then approximate [A^*A_k Y^*Y]_k.
"""
#logging.debug("< eigenAdd >")
Parameter.checkInt(k, 0, omega.shape[0])
#if not numpy.isrealobj(omega) or not numpy.isrealobj(Q):
# raise ValueError("Eigenvalues and eigenvectors must be real")
if omega.ndim != 1:
raise ValueError("omega must be 1-d array")
if omega.shape[0] != Q.shape[1]:
raise ValueError("Must have same number of eigenvalues and eigenvectors")
if __debug__:
Parameter.checkOrthogonal(Q, tol=EigenUpdater.tol, softCheck=True, arrayInfo="input Q in eigenAdd()")
#Taking the abs of the eigenvalues is correct
inds = numpy.flipud(numpy.argsort(numpy.abs(omega)))
omega, Q = Util.indEig(omega, Q, inds[numpy.abs(omega)>EigenUpdater.tol])
Omega = numpy.diag(omega)
YY = Y.conj().T.dot(Y)
QQ = Q.dot(Q.conj().T)
Ybar = Y - Y.dot(QQ)
Pbar, sigmaBar, Qbar = numpy.linalg.svd(Ybar, full_matrices=False)
inds = numpy.flipud(numpy.argsort(numpy.abs(sigmaBar)))
inds = inds[numpy.abs(sigmaBar)>EigenUpdater.tol]
Pbar, sigmaBar, Qbar = Util.indSvd(Pbar, sigmaBar, Qbar, inds)
SigmaBar = numpy.diag(sigmaBar)
Qbar = Ybar.T.dot(Pbar)
Qbar = Qbar.dot(numpy.diag(numpy.diag(Qbar.T.dot(Qbar))**-0.5))
r = sigmaBar.shape[0]
YQ = Y.dot(Q)
Zeros = numpy.zeros((r, omega.shape[0]))
D = numpy.c_[Q, Qbar]
YYQQ = YY.dot(QQ)
Z = D.conj().T.dot(YYQQ + YYQQ.conj().T).dot(D)
F = numpy.c_[numpy.r_[Omega - YQ.conj().T.dot(YQ), Zeros], numpy.r_[Zeros.T, SigmaBar.conj().dot(SigmaBar)]]
F = F + Z
pi, H = scipy.linalg.eigh(F)
inds = numpy.flipud(numpy.argsort(numpy.abs(pi)))
H = H[:, inds[0:k]]
pi = pi[inds[0:k]]
V = D.dot(H)
#logging.debug("</ eigenAdd >")
return pi, V
示例3: eigenRemove
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def eigenRemove(omega, Q, n, k, debug=False):
"""
Remove a set of rows and columns from a matrix whose eigen-decomposition
is Q diag(omega) Q^T. Keep the first n rows/cols i.e. the rows/cols starting
from n to the end are removed and k is the number of eigenvectors/values
to return for the new matrix. We could generalise this to delete a given
list of rows/cols.
"""
#logging.debug("< eigenRemove >")
Parameter.checkClass(omega, numpy.ndarray)
Parameter.checkClass(Q, numpy.ndarray)
Parameter.checkInt(k, 0, float('inf'))
Parameter.checkInt(n, 0, Q.shape[0])
if omega.ndim != 1:
raise ValueError("omega must be 1-d array")
if omega.shape[0] != Q.shape[1]:
raise ValueError("Must have same number of eigenvalues and eigenvectors")
if __debug__:
Parameter.checkOrthogonal(Q, tol=EigenUpdater.tol, softCheck=True, arrayInfo="input Q in eigenRemove()")
inds = numpy.flipud(numpy.argsort(numpy.abs(omega)))
inds = inds[omega[inds]>EigenUpdater.tol]
omega, Q = Util.indEig(omega, Q, inds[0:k])
AB = (Q[0:n, :]*omega).dot(Q[n:, :].T)
BB = (Q[n:, :]*omega).dot(Q[n:, :].T)
p = BB.shape[0]
Y1 = numpy.r_[numpy.zeros((n, p)), numpy.eye(p)]
Y2 = -numpy.r_[AB, 0.5*BB]
pi, V = EigenUpdater.eigenAdd2(omega, Q, Y1, Y2, k)
#check last rows are zero
if numpy.linalg.norm(V[n:, :]) >= EigenUpdater.tol:
logging.warn("numpy.linalg.norm(V[n:, :])= %s" % str(numpy.linalg.norm(V[n:, :])))
#logging.debug("</ eigenRemove >")
if not debug:
return pi, V[0:n, :]
else:
C = (Q*omega).dot(Q.T)
K = C + Y1.dot(Y2.T) + Y2.dot(Y1.T)
assert numpy.linalg.norm(BB- C[n:, n:]) <= EigenUpdater.tol
assert numpy.linalg.norm(AB - C[0:n, n:]) <= EigenUpdater.tol, "%s \n %s" % (AB, C[0:n, n:])
return pi, V[0:n, :], K, Y1, Y2, omega
示例4: testEigenAdd2
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def testEigenAdd2(self):
tol = 10**-6
for i in range(10):
m = numpy.random.randint(5, 10)
n = numpy.random.randint(5, 10)
p = numpy.random.randint(5, 10)
A = numpy.random.randn(m, n)
Y1 = numpy.random.randn(n, p)
Y2 = numpy.random.randn(n, p)
AA = A.conj().T.dot(A)
Y1Y2 = Y1.dot(Y2.conj().T)
lastError = 100
omega, Q = numpy.linalg.eigh(AA)
self.assertTrue(numpy.linalg.norm(AA-(Q*omega).dot(Q.conj().T)) < tol )
C = AA + Y1Y2 + Y1Y2.conj().T
for k in range(1,9):
pi, V, D, DUD = EigenUpdater.eigenAdd2(omega, Q, Y1, Y2, k, debug = True)
# V is "orthogonal"
self.assertTrue(numpy.linalg.norm(V.conj().T.dot(V) - numpy.eye(V.shape[1])) < tol )
# The approximation converges to the exact decomposition
C_k = (V*pi).dot(V.conj().T)
error = numpy.linalg.norm(C-C_k)
if Util.rank(C)==k:
self.assertTrue(error <= tol)
lastError = error
# DomegaD corresponds to AA_k
omega_k, Q_k = Util.indEig(omega, Q, numpy.flipud(numpy.argsort(omega))[0:k])
DomegakD = (D*numpy.c_[omega_k[numpy.newaxis,:],numpy.zeros((1,max(D.shape[1]-k,0)))]).dot(D.conj().T)
self.assertTrue(numpy.linalg.norm((Q_k*omega_k).dot(Q_k.conj().T)-DomegakD) < tol )
# DUD is exactly decomposed
self.assertTrue(numpy.linalg.norm(Y1Y2 + Y1Y2.conj().T - D.dot(DUD).dot(D.conj().T)) < tol )
示例5:
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
p = 0.001
generator.setP(p)
graph = generator.generate(graph, requireEmpty=False)
AA2 = graph.normalisedLaplacianSym()
U = AA2 - AA
#print(U)
k = 45
lmbdaA, QA = numpy.linalg.eigh(AA)
lmbdaA, QA = Util.indEig(lmbdaA, QA, numpy.flipud(numpy.argsort(lmbdaA)))
lmbdaAk, QAk = Util.indEig(lmbdaA, QA, numpy.flipud(numpy.argsort(lmbdaA))[0:k])
lmbdaU, QU = numpy.linalg.eigh(U)
lmbdaU, QU = Util.indEig(lmbdaU, QU, numpy.flipud(numpy.argsort(lmbdaU)))
AAk = (QAk*lmbdaAk).dot(QAk.T)
lmbdaAU, QAU = numpy.linalg.eigh(AA + U)
lmbdaAU, QAU = Util.indEig(lmbdaAU, QAU, numpy.flipud(numpy.argsort(lmbdaAU)))
lmbdaAUk, QAUk = Util.indEig(lmbdaAU, QAU, numpy.flipud(numpy.argsort(lmbdaAU))[0:k])
lmbdaAkU, QAkU = numpy.linalg.eigh(AAk + U)
lmbdaAkU, QAkU = Util.indEig(lmbdaAkU, QAkU, numpy.flipud(numpy.argsort(lmbdaAkU)))
示例6: eigenAdd2
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def eigenAdd2(omega, Q, Y1, Y2, k, debug= False):
"""
Compute an approximation of the eigendecomposition A^*A + Y1Y2^* +Y2Y1^*
in which Y1, Y2 are low rank matrices, Y1^*Y2=0 and A^*A = Q Omega Q*. We
use the rank-k approximation of A^*A: Q_k Omega_k Q_k^* and then find
[A^*A_k + Y1Y2^* + Y2Y1^*]. If debug=False then pi, V are returned which
respectively correspond to all the eigenvalues/eigenvectors of
[A^*A_k + Y1Y2^* + Y2Y1^*].
"""
#logging.debug("< eigenAdd2 >")
Parameter.checkInt(k, 0, float('inf'))
Parameter.checkClass(omega, numpy.ndarray)
Parameter.checkClass(Q, numpy.ndarray)
Parameter.checkClass(Y1, numpy.ndarray)
Parameter.checkClass(Y2, numpy.ndarray)
if not numpy.isrealobj(omega) or not numpy.isrealobj(Q):
logging.warn("Eigenvalues or eigenvectors are not real")
if not numpy.isrealobj(Y1) or not numpy.isrealobj(Y2):
logging.warn("Y1 or Y2 are not real")
if omega.ndim != 1:
raise ValueError("omega must be 1-d array")
if omega.shape[0] != Q.shape[1]:
raise ValueError("Must have same number of eigenvalues and eigenvectors")
if Q.shape[0] != Y1.shape[0]:
raise ValueError("Q must have the same number of rows as Y1 rows")
if Q.shape[0] != Y2.shape[0]:
raise ValueError("Q must have the same number of rows as Y2 rows")
if Y1.shape[1] != Y2.shape[1]:
raise ValueError("Y1 must have the same number of columns as Y2 columns")
if __debug__:
Parameter.checkArray(omega, softCheck=True, arrayInfo="omega as input in eigenAdd2()")
Parameter.checkArray(Q, softCheck=True, arrayInfo="Q as input in eigenAdd2()")
Parameter.checkOrthogonal(Q, tol=EigenUpdater.tol, softCheck=True, arrayInfo="Q as input in eigenAdd2()")
Parameter.checkArray(Y1, softCheck=True, arrayInfo="Y1 as input in eigenAdd2()")
Parameter.checkArray(Y2, softCheck=True, arrayInfo="Y2 as input in eigenAdd2()")
#Get first k eigenvectors/values of A^*A
omega, Q = Util.indEig(omega, Q, numpy.flipud(numpy.argsort(omega))[0:k])
QY1 = Q.conj().T.dot(Y1)
Y1bar = Y1 - Q.dot(QY1)
P1bar, sigma1Bar, Q1bar = Util.safeSvd(Y1bar)
inds = numpy.arange(sigma1Bar.shape[0])[numpy.abs(sigma1Bar)>EigenUpdater.tol]
P1bar, sigma1Bar, Q1bar = Util.indSvd(P1bar, sigma1Bar, Q1bar, inds)
# checks on SVD decomposition of Y1bar
if __debug__:
Parameter.checkArray(QY1, softCheck=True, arrayInfo="QY1 in eigenAdd2()")
Parameter.checkArray(Y1bar, softCheck=True, arrayInfo="Y1bar in eigenAdd2()")
Parameter.checkArray(P1bar, softCheck=True, arrayInfo="P1bar in eigenAdd2()")
if not Parameter.checkOrthogonal(P1bar, tol=EigenUpdater.tol, softCheck=True, arrayInfo="P1bar in eigenAdd2()", investigate=True):
print ("corresponding sigma: ", sigma1Bar)
Parameter.checkArray(sigma1Bar, softCheck=True, arrayInfo="sigma1Bar in eigenAdd2()")
Parameter.checkArray(Q1bar, softCheck=True, arrayInfo="Q1bar in eigenAdd2()")
if not Parameter.checkOrthogonal(Q1bar, tol=EigenUpdater.tol, softCheck=True, arrayInfo="Q1bar in eigenAdd2()"):
print ("corresponding sigma: ", sigma1Bar)
del Y1bar
P1barY2 = P1bar.conj().T.dot(Y2)
QY2 = Q.conj().T.dot(Y2)
Y2bar = Y2 - Q.dot(QY2) - P1bar.dot(P1barY2)
P2bar, sigma2Bar, Q2bar = Util.safeSvd(Y2bar)
inds = numpy.arange(sigma2Bar.shape[0])[numpy.abs(sigma2Bar)>EigenUpdater.tol]
P2bar, sigma2Bar, Q2bar = Util.indSvd(P2bar, sigma2Bar, Q2bar, inds)
# checks on SVD decomposition of Y1bar
if __debug__:
Parameter.checkArray(P1barY2, softCheck=True, arrayInfo="P1barY2 in eigenAdd2()")
Parameter.checkArray(QY2, softCheck=True, arrayInfo="QY2 in eigenAdd2()")
Parameter.checkArray(Y2bar, softCheck=True, arrayInfo="Y2bar in eigenAdd2()")
Parameter.checkArray(P2bar, softCheck=True, arrayInfo="P2bar in eigenAdd2()")
Parameter.checkOrthogonal(P2bar, tol=EigenUpdater.tol, softCheck=True, arrayInfo="P2bar in eigenAdd2()")
Parameter.checkArray(sigma2Bar, softCheck=True, arrayInfo="sigma2Bar in eigenAdd2()")
Parameter.checkArray(Q2bar, softCheck=True, arrayInfo="Q2bar in eigenAdd2()")
Parameter.checkOrthogonal(Q2bar, tol=EigenUpdater.tol, softCheck=True, arrayInfo="Q2bar in eigenAdd2()")
del Y2bar
r = omega.shape[0]
p = Y1.shape[1]
p1 = sigma1Bar.shape[0]
p2 = sigma2Bar.shape[0]
D = numpy.c_[Q, P1bar, P2bar]
del P1bar
del P2bar
# rem: A*s = A.dot(diag(s)) ; A*s[:,new] = diag(s).dot(A)
DStarY1 = numpy.r_[QY1, sigma1Bar[:,numpy.newaxis] * Q1bar.conj().T, numpy.zeros((p2, p))]
DStarY2 = numpy.r_[QY2, P1barY2, sigma2Bar[:,numpy.newaxis] * Q2bar.conj().T]
DStarY1Y2StarD = DStarY1.dot(DStarY2.conj().T)
del DStarY1
del DStarY2
r = omega.shape[0]
F = numpy.zeros((r+p1+p2, r+p1+p2))
#.........这里部分代码省略.........
示例7: testIncrementEigenSystem2
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def testIncrementEigenSystem2(self):
print "< testIncrementEigenSystem2 >"
"""
We use the example from the paper to see if the error in the eigenvalues
and eigenvectors decreases.
"""
numVertices = 10
graph = SparseGraph(GeneralVertexList(numVertices))
graph.addEdge(0, 1, 0.7)
graph.addEdge(1, 2, 0.4)
graph.addEdge(2, 3, 0.3)
graph.addEdge(1, 3, 0.1)
graph.addEdge(0, 4, 0.5)
graph.addEdge(3, 4, 0.4)
graph.addEdge(4, 5, 0.8)
graph.addEdge(3, 5, 0.3)
graph.addEdge(6, 5, 0.4)
graph.addEdge(5, 9, 0.5)
graph.addEdge(6, 9, 0.3)
graph.addEdge(6, 7, 0.1)
graph.addEdge(6, 8, 0.6)
graph.addEdge(7, 8, 0.7)
graph.addEdge(9, 8, 0.7)
W = graph.getWeightMatrix()
L = graph.laplacianWeightMatrix()
degrees = numpy.sum(W, 0)
D = numpy.diag(degrees)
k = 3
lmbda1, Q1 = scipy.linalg.eig(L, D)
inds = numpy.argsort(lmbda1)[0:k]
lmbda1, Q1 = Util.indEig(lmbda1, Q1, inds)
lmbda1 = lmbda1.real
#Remove edge 0, 4
r = numpy.zeros(numVertices, numpy.complex)
deltaW = -0.5
clusterer = NingSpectralClustering(k)
lmbda2Approx, Q2Approx = clusterer.incrementEigenSystem(lmbda1, Q1, scipy.sparse.csr_matrix(W), 0, 4, deltaW)
#Compute real eigenvectors then compare against these
Lhat = L + numpy.outer(r, r)
Dhat = numpy.diag(numpy.diag(Lhat))
lmbda2, Q2 = scipy.linalg.eig(Lhat, Dhat)
lmbda2, Q2 = Util.indEig(lmbda2, Q2, inds)
Q2Approx = Q2Approx.dot(numpy.diag(numpy.diag(Q2Approx.T.dot(Q2Approx))**-0.5))
Q2 = Q2.dot(numpy.diag(numpy.sum(Q2**2, 0)**-0.5))
Q1 = Q1.dot(numpy.diag(numpy.sum(Q1**2, 0)**-0.5))
#Compute error according to the paper
#2 iterations works best - 3 seems to be worse!!!
error2 = 1 - numpy.diag(Q2.T.dot(Q2Approx))**2
errors2 = 1 - numpy.diag(Q2.T.dot(Q1))**2
logging.debug("Eigenvector Errors")
logging.debug(error2)
logging.debug(errors2)
示例8: testIncrementalEigenSystem3
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
def testIncrementalEigenSystem3(self):
print "< testIncrementEigenSystem3 >"
"""
Test case where we add a vertex and need to increase size of eigenvectors.
"""
numVertices = 8
graph = SparseGraph(GeneralVertexList(numVertices))
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(1, 2)
graph.addEdge(3, 4)
graph.addEdge(3, 5)
graph.addEdge(4, 5)
graph.addEdge(0, 3)
graph.addEdge(1, 6)
graph.addEdge(4, 7)
subgraph = graph.subgraph(range(7))
W1 = subgraph.getWeightMatrix()
L1 = subgraph.laplacianWeightMatrix()
degrees1 = numpy.sum(W1, 0)
D1 = numpy.diag(degrees1)
W2 = graph.getWeightMatrix()
L2 = graph.laplacianWeightMatrix()
degrees1 = numpy.sum(W2, 0)
D2 = numpy.diag(degrees1)
k = 3
lmbda1, Q1 = scipy.linalg.eig(L1, D1)
inds = numpy.argsort(lmbda1)[0:k]
lmbda1, Q1 = Util.indEig(lmbda1, Q1, inds)
lmbda1 = lmbda1.real
L1hat = numpy.r_[numpy.c_[L1, numpy.zeros(numVertices-1)], numpy.zeros((1, numVertices))]
W1hat = numpy.r_[numpy.c_[W1, numpy.zeros(numVertices-1)], numpy.zeros((1, numVertices))]
D1hat = numpy.r_[numpy.c_[D1, numpy.zeros(numVertices-1)], numpy.zeros((1, numVertices))]
lmbda1, Q2 = scipy.linalg.eig(L2, D2)
inds = numpy.argsort(lmbda1)[0:k]
lmbda1, Q2 = Util.indEig(lmbda1, Q2, inds)
lmbda1 = lmbda1.real
Q1 = numpy.r_[Q1, numpy.ones((1, Q1.shape[1]))]
#Increase size of eigenvector - not clear how to do this
clusterer = NingSpectralClustering(k)
lmbda2Approx, Q2Approx = clusterer.incrementEigenSystem(lmbda1, Q1, scipy.sparse.csr_matrix(W1hat), 4, 7, 1)
Q2Approx = Q2Approx.dot(numpy.diag(numpy.diag(Q2Approx.T.dot(Q2Approx))**-0.5))
Q2 = Q2.dot(numpy.diag(numpy.sum(Q2**2, 0)**-0.5))
Q1 = Q1.dot(numpy.diag(numpy.sum(Q1**2, 0)**-0.5))
#Setting the last value of the eigenvectors to zero seems to improve
#over setting them to 1, but the last eigenvector has a huge error.
errors1 = 1 - numpy.diag(Q2.T.dot(Q2Approx))**2
errors2 = 1 - numpy.diag(Q2.T.dot(Q1))**2
logging.debug("Eigenvector Errors for added vertex")
logging.debug(errors1)
logging.debug(errors2)
示例9: print
# 需要导入模块: from sandbox.util.Util import Util [as 别名]
# 或者: from sandbox.util.Util.Util import indEig [as 别名]
s, V = scipy.sparse.linalg.eigsh(X, k)
#Now change X a bit
dX = scipy.sparse.rand(n, n, 0.0001)
dX = dX.dot(dX.T)
print(dX.getnnz())
X = X + dX
startTime = time.time()
s1, V1 = scipy.sparse.linalg.eigs(X, k, which="LM")
timeTaken = time.time() - startTime
print(timeTaken)
#This function gives different results to the others
#In fact the eigenvalues are very different
startTime = time.time()
s2, V2 = scipy.sparse.linalg.lobpcg(X, V, largest=True, maxiter=200, tol=10**-8, verbosityLevel=1)
timeTaken = time.time() - startTime
print(timeTaken)
#Now test with numpy
Xd = numpy.array(X.todense())
s3, V3 = numpy.linalg.eig(Xd)
inds = numpy.flipud(numpy.argsort(numpy.abs(s3)))
s3, V3 = Util.indEig(s3, V3, inds[0:k])
print(s1)
print(s2)
print(s3)