本文整理汇总了Python中scipy.linalg.eigh函数的典型用法代码示例。如果您正苦于以下问题:Python eigh函数的具体用法?Python eigh怎么用?Python eigh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eigh函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: eigenvectors
def eigenvectors(h,nk=10):
import scipy.linalg as lg
from scipy.sparse import csc_matrix as csc
if h.dimensionality==0:
vv = lg.eigh(h.intra)
vecs = [v for v in vv[1].transpose()]
return vv[0],vecs
elif h.dimensionality>0:
f = h.get_hk_gen()
if h.dimensionality==1: kp = np.linspace(0.,1.0,nk,endpoint=False)
if h.dimensionality==2:
kp = []
for k1 in np.linspace(0.,1.0,nk,endpoint=False):
for k2 in np.linspace(0.,1.0,nk,endpoint=False):
kp.append([k1,k2]) # store
eigvecs = np.zeros((len(kp)*h.intra.shape[0],h.intra.shape[0]),dtype=np.complex) # eigenvectors
eigvals = np.zeros((len(kp)*h.intra.shape[0])) # eigenvalues
iv = 0
for k in kp: # loop over kpoints
hk = f(k) # kdependent hamiltonian
vv = lg.eigh(hk) # diagonalize k hamiltonian
for (e,v) in zip(vv[0],vv[1].transpose()):
eigvecs[iv] = v.copy()
eigvals[iv] = e.copy()
iv += 1
# eigvals += vv[0].tolist() # store eigenvalues
# vecs = [v for v in vv[1].transpose()]
# eigvecs += vecs # store eigenvectors
return eigvals,eigvecs
else:
raise
示例2: set
def set(self,gse):
'''
Set the Lambda matrix, Q matrix and QT matrix of the block.
Parameters
----------
gse : number
The groundstate energy.
'''
sign=self.controllers['sign']
if self.method=='S':
lczs,Qs=self.controllers['lczs'],self.controllers['Qs']
self.data['niters']=np.zeros(Qs.shape[0],dtype=np.int32)
self.data['Lambdas']=np.zeros((Qs.shape[0],Qs.shape[2]),dtype=np.float64)
self.data['Qs']=np.zeros(Qs.shape,dtype=Qs.dtype)
self.data['QTs']=np.zeros((Qs.shape[0],Qs.shape[2]),dtype=Qs.dtype)
for i,(lcz,Q) in enumerate(zip(lczs,Qs)):
if lcz.niter>0:
E,V=sl.eigh(lcz.T,eigvals_only=False)
self.data['niters'][i]=lcz.niter
self.data['Lambdas'][i,0:lcz.niter]=sign*(E-gse)
self.data['Qs'][i,:,0:lcz.niter]=Q[:,0:lcz.niter].dot(V)
self.data['QTs'][i,0:lcz.niter]=lcz.P[0,0]*V[0,:].conjugate()
else:
lanczos=self.controllers['lanczos']
E,V=sl.eigh(lanczos.T,eigvals_only=False)
self.data['Lambda']=sign*(E-gse)
self.data['Q']=lanczos.P[:min(lanczos.nv0,lanczos.niter),:].T.conjugate().dot(V[:min(lanczos.nv0,lanczos.niter),:])
self.data['QT']=HM.dagger(self.data['Q'])
示例3: jitEigh
def jitEigh(A,maxTries=10,warning=True):
"""
Do a Eigenvalue Decompsition with Jitter,
works as jitChol
"""
warning = True
jitter = 0
i = 0
while(True):
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
S,U = linalg.eigh(A)
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitEigh()." % jitter)
S,U = linalg.eigh(A+jitter*SP.eye(A.shape[0]))
if S.min()>1E-10:
return S,U
if i<maxTries:
jitter = jitter*10
i += 1
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
示例4: geneigh
def geneigh(A,B,tol=1e-12):
"""
Solves the generalized eigenvalue problem also in the case where A and B share a common
null-space. The eigenvalues corresponding to the null-space are given a Nan value.
The null-space is defined with the tolereance tol.
"""
# first check if there is a null-space issue
if lg.matrix_rank(B,tol)==np.shape(B)[0]:
return eigh(A,B)
# first diagonalize the overlap matrix B
Be,Bv=eigh(B)
# rewrite the A matrix in the B-matrix eigenspace
At=np.dot(np.conj(Bv.T),np.dot(A,Bv))
Bt=np.diag(Be)
# detect shared null-space. that is given by the first n null eigenvalues of B
try:
idx=next(i for i,v in enumerate(Be) if v>tol)
except StopIteration:
raise(RuntimeError('geneigh: Rank of B < B.shape[0] but null-space could not be found!'))
# check that the B matrix null-space is shared by A.
m=np.amax(abs(At[0:idx,:].flatten()))
if m>tol:
warnings.warn('Maximum non-diagonal element in A written in B null-space is bigger than the tolerance \''+str(tol)+'\'.',UserWarning)
# diagonalize the non-null-space part of the problem
Et,Vt=eigh(At[idx:,idx:],Bt[idx:,idx:])
# define Ut, the change of basis in the non-truncated space
Ut=np.zeros(np.shape(A),A.dtype)
Ut[0:idx,0:idx]=np.eye(idx)
Ut[idx:,idx:]=Vt
U=np.dot(Bv,Ut)
E=np.concatenate((float('NaN')*np.ones(idx),Et))
return E,U
示例5: TBAEB
def TBAEB(engine,app):
nmatrix=len(engine.generators['h'].table)
if app.path!=None:
key=app.path.mesh.keys()[0]
result=zeros((app.path.rank[key],nmatrix+1))
if len(app.path.mesh[key].shape)==1:
result[:,0]=app.path.mesh[key]
else:
result[:,0]=array(xrange(app.path.rank[key]))
for i,parameter in enumerate(list(app.path.mesh[key])):
result[i,1:]=eigh(engine.matrix(**{key:parameter}),eigvals_only=True)
else:
result=zeros((2,nmatrix+1))
result[:,0]=array(xrange(2))
result[0,1:]=eigh(engine.matrix(),eigvals_only=True)
result[1,1:]=result[0,1:]
if app.save_data:
savetxt(engine.dout+'/'+engine.name.full+'_EB.dat',result)
if app.plot:
plt.title(engine.name.full+'_EB')
plt.plot(result[:,0],result[:,1:])
if app.show:
plt.show()
else:
plt.savefig(engine.dout+'/'+engine.name.full+'_EB.png')
plt.close()
示例6: _eval_cov_learner
def _eval_cov_learner(X, train_ix, test_ix, model_prec, model_cov,
cov_learner, ips_flag=True):
X_train = X[train_ix, ...]
alpha_max_ = alpha_max(X_train)
if model_prec is None and model_cov is None:
X_test = X[test_ix, ...]
elif model_cov is None:
eigvals, eigvecs = linalg.eigh(model_prec)
X_test = np.diag(1. / np.sqrt(eigvals)).dot(eigvecs.T)
else:
eigvals, eigvecs = linalg.eigh(model_prec)
X_test = np.diag(np.sqrt(eigvals)).dot(eigvecs.T)
cov_learner_ = clone(cov_learner)
cov_learner_.__setattr__('alpha', cov_learner_.alpha * alpha_max_)
if not ips_flag:
score = cov_learner_.fit(X_train).score(X_test)
elif cov_learner.score_norm != "ell0":
# dual split variable contains exact zeros!
aux_prec = cov_learner_.fit(X_train).auxiliary_prec_
mask = np.abs(aux_prec) > machine_eps(0.)
ips = IPS(support=mask, score_norm=cov_learner_.score_norm)
score = ips.fit(X_train).score(X_test)
else:
raise ValueError('ell0 scoring in CV_loop and IPS are incompatible')
# make scores maximal at optimum
if cov_learner_.score_norm not in {'loglikelihood', None}:
score *= -1.
return score
示例7: test_hermitian
def test_hermitian():
np.random.seed(1234)
sizes = [3, 10, 50]
ks = [1, 3, 10, 50]
gens = [True, False]
for size, k, gen in itertools.product(sizes, ks, gens):
if k > size:
continue
H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
H = 10 * np.eye(size) + H + H.T.conj()
X = np.random.rand(size, k)
if not gen:
B = np.eye(size)
w, v = lobpcg(H, X, maxiter=5000)
w0, v0 = eigh(H)
else:
B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
B = 10 * np.eye(size) + B.dot(B.T.conj())
w, v = lobpcg(H, X, B, maxiter=5000)
w0, v0 = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx) / np.linalg.norm(H.dot(vx)),
0, atol=5e-4, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
示例8: fitPairwiseModel
def fitPairwiseModel(Y,XX=None,S_XX=None,U_XX=None,verbose=False):
N,P = Y.shape
""" initilizes parameters """
RV = fitSingleTraitModel(Y,XX=XX,S_XX=S_XX,U_XX=U_XX,verbose=verbose)
Cg = covariance.freeform(2)
Cn = covariance.freeform(2)
gp = gp2kronSum(mean(Y[:,0:2]),Cg,Cn,XX=XX,S_XX=S_XX,U_XX=U_XX)
conv2 = SP.ones((P,P),dtype=bool)
rho_g = SP.ones((P,P))
rho_n = SP.ones((P,P))
for p1 in range(P):
for p2 in range(p1):
if verbose:
print '.. fitting correlation (%d,%d)'%(p1,p2)
gp.setY(Y[:,[p1,p2]])
Cg_params0 = SP.array([SP.sqrt(RV['varST'][p1,0]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,0])])
Cn_params0 = SP.array([SP.sqrt(RV['varST'][p1,1]),1e-6*SP.randn(),SP.sqrt(RV['varST'][p2,1])])
params0 = {'Cg':Cg_params0,'Cn':Cn_params0}
conv2[p1,p2],info = OPT.opt_hyper(gp,params0,factr=1e3)
rho_g[p1,p2] = Cg.K()[0,1]/SP.sqrt(Cg.K().diagonal().prod())
rho_n[p1,p2] = Cn.K()[0,1]/SP.sqrt(Cn.K().diagonal().prod())
conv2[p2,p1] = conv2[p1,p2]; rho_g[p2,p1] = rho_g[p1,p2]; rho_n[p2,p1] = rho_n[p1,p2]
RV['Cg0'] = rho_g*SP.dot(SP.sqrt(RV['varST'][:,0:1]),SP.sqrt(RV['varST'][:,0:1].T))
RV['Cn0'] = rho_n*SP.dot(SP.sqrt(RV['varST'][:,1:2]),SP.sqrt(RV['varST'][:,1:2].T))
RV['conv2'] = conv2
#3. regularizes covariance matrices
offset_g = abs(SP.minimum(LA.eigh(RV['Cg0'])[0].min(),0))+1e-4
offset_n = abs(SP.minimum(LA.eigh(RV['Cn0'])[0].min(),0))+1e-4
RV['Cg0_reg'] = RV['Cg0']+offset_g*SP.eye(P)
RV['Cn0_reg'] = RV['Cn0']+offset_n*SP.eye(P)
RV['params0_Cg']=LA.cholesky(RV['Cg0_reg'])[SP.tril_indices(P)]
RV['params0_Cn']=LA.cholesky(RV['Cn0_reg'])[SP.tril_indices(P)]
return RV
示例9: pca_whiten
def pca_whiten(x2d, n_comp, verbose=True):
""" data Whitening
*Input
x2d : 2d data matrix of observations by variables
n_comp: Number of components to retain
*Output
Xwhite : Whitened X
white : whitening matrix (Xwhite = np.dot(white,X))
dewhite : dewhitening matrix (X = np.dot(dewhite,Xwhite))
"""
x2d_demean = x2d - x2d.mean(axis=1).reshape((-1, 1))
samples, features = x2d_demean.shape
M = min((samples, features))
if samples > features:
cov = dot(x2d_demean.T, x2d_demean) / (x2d.shape[0] - 1)
w, v = eigh(cov, eigvals=(M-n_comp, M-1))
D, Di = diagsqrts(w)
u = dot(dot(x2d_demean,v),Di)
x_white = v.T
white = dot(Di, u.T)
dewhite = dot(u, D)
else:
cov = dot(x2d_demean, x2d_demean.T) / (x2d.shape[1] - 1)
w, u = eigh(cov, eigvals=(M-n_comp, M-1))
D, Di = diagsqrts(w)
white = dot(Di, u.T)
x_white = dot(white, x2d_demean)
dewhite = dot(u, D)
return (x_white, white, dewhite)
示例10: getEig1
def getEig1(self):
'''
'''
if self.eig1 is None:
#compute eig1
if self.K1 is not None:
if self.K1rot is None:
self.K1rot = rotSymm(self.K1, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
self.K1rot = rotSymm(self.K1rot.T, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
self.eig1 = LA.eigh(self.K1rot)
elif self.G1 is not None:
[N,k] = self.G1.shape
if self.G1rot is None:
self.G1rot = rotSymm(self.G1, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
try:
[U,S,V] = LA.svd(self.G1rot,full_matrices = False)
self.eig1 = [S*S,U]
except LA.LinAlgError: # revert to Eigenvalue decomposition
print "Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate"
[S_,V_] = LA.eigh(self.G1rot.T.dot(self.G1rot))
S_nonz=(S_>0.0)
S1 = S_[S_nonz]
U1=self.G1rot.dot(V_[:,S_nonz]/SP.sqrt(S1))
self.eig1=[S1,U1]
return self.eig1
示例11: geneigh
def geneigh(A,B,tol=1e-12):
"""
Solves the generalized eigenvalue problem also in the case where A and B share a common
null-space. The eigenvalues corresponding to the null-space are given a Nan value.
The null-space is defined with the tolereance tol.
"""
# first check if there is a null-space issue
if matrix_rank(B,tol)==shape(B)[0]:
return eigh(A,B)
# first diagonalize the overlap matrix B
Be,Bv=eigh(B)
# rewrite the A matrix in the B-matrix eigenspace
At=dot(conj(Bv.T),dot(A,Bv))
Bt=diag(Be)
# detect shared null-space. that is given by the first n null eigenvalues of B
idx=find(Be>tol)
idx=idx[0]
# check that the B matrix null-space is shared by A.
m=amax(abs(At[0:idx,:].flatten()))
if m>tol:
warnings.warn('Maximum non-diagonal element in A written in B null-space is bigger than the tolerance \''+str(tol)+'\'.',UserWarning)
# diagonalize the non-null-space part of the problem
Et,Vt=eigh(At[idx:,idx:],Bt[idx:,idx:])
# define Ut, the change of basis in the non-truncated space
Ut=zeros(shape(A),A.dtype)
Ut[0:idx,0:idx]=eye(idx)
Ut[idx:,idx:]=Vt
U=dot(Bv,Ut)
E=append(float('NaN')*ones(idx),Et)
return E,U
示例12: _fit
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)."""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
示例13: numpy_matrix_operator_with_arrays_and_products_factory
def numpy_matrix_operator_with_arrays_and_products_factory(dim_source, dim_range, count_source, count_range, seed,
source_id=None, range_id=None):
from scipy.linalg import eigh
op, _, U, V = numpy_matrix_operator_with_arrays_factory(dim_source, dim_range, count_source, count_range, seed,
source_id=source_id, range_id=range_id)
if dim_source > 0:
while True:
sp = np.random.random((dim_source, dim_source))
sp = sp.T.dot(sp)
evals = eigh(sp, eigvals_only=True)
if np.min(evals) > 1e-6:
break
sp = NumpyMatrixOperator(sp, source_id=source_id, range_id=source_id)
else:
sp = NumpyMatrixOperator(np.zeros((0, 0)), source_id=source_id, range_id=source_id)
if dim_range > 0:
while True:
rp = np.random.random((dim_range, dim_range))
rp = rp.T.dot(rp)
evals = eigh(rp, eigvals_only=True)
if np.min(evals) > 1e-6:
break
rp = NumpyMatrixOperator(rp, source_id=range_id, range_id=range_id)
else:
rp = NumpyMatrixOperator(np.zeros((0, 0)), source_id=range_id, range_id=range_id)
return op, None, U, V, sp, rp
示例14: simulate
def simulate(self,standardize=True):
self._update_cache()
RV = SP.zeros((self.N,self.P))
# region
Z = SP.randn(self.S,self.P)
Sc,Uc = LA.eigh(self.Cr.K())
Sc[Sc<1e-9] = 0
USh_c = Uc*Sc[SP.newaxis,:]**0.5
RV += SP.dot(SP.dot(self.Xr,Z),USh_c.T)
# background
Z = SP.randn(self.N,self.P)
USh_r = self.cache['Lr'].T*self.cache['Srstar'][SP.newaxis,:]**0.5
Sc,Uc = LA.eigh(self.Cg.K())
Sc[Sc<1e-9] = 0
USh_c = Uc*Sc[SP.newaxis,:]**0.5
RV += SP.dot(SP.dot(USh_r,Z),USh_c.T)
# noise
Z = SP.randn(self.N,self.P)
Sc,Uc = LA.eigh(self.Cn.K())
Sc[Sc<1e-9] = 0
USh_c = Uc*Sc[SP.newaxis,:]**0.5
RV += SP.dot(Z,USh_c.T)
# standardize
if standardize:
RV-=RV.mean(0)
RV/=RV.std(0)
return RV
示例15: cca
def cca(X, Y, k, SMALL=1e-5):
"""Standard CCA.
Views _X_ and _Y_ are per *row*.
For an explanation of the algorithm,
see Section 6.4 and 6.5 in
Kernel Methods for Pattern Analysis.
"""
n, dx = X.shape
C = np.cov(X.T, Y.T)
Cxy = C[:dx, dx:]
Cxx = C[:dx, :dx] + SMALL * np.eye(dx)
Cyy = C[dx:, dx:] + SMALL * np.eye(Y.shape[1])
# Do not use la.sqrtm.
# This can be done by hand...
xeval, xevec = la.eigh(Cxx)
yeval, yevec = la.eigh(Cyy)
# ... because the inverses are then simple
isqrtx = np.dot(xevec, (xevec/np.sqrt(xeval)).T)
isqrty = np.dot(yevec, (yevec/np.sqrt(yeval)).T)
tmp = np.dot(isqrtx, Cxy)
tmp = np.dot(tmp, isqrty)
[U, S, V] = la.svd(tmp, full_matrices=False)
ccX = np.dot(isqrtx, U[:,:k])
ccY = np.dot(isqrty, V[:k].T)
return ccX, ccY