当前位置: 首页>>代码示例>>Python>>正文


Python scipy.diag函数代码示例

本文整理汇总了Python中scipy.diag函数的典型用法代码示例。如果您正苦于以下问题:Python diag函数的具体用法?Python diag怎么用?Python diag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了diag函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: orlancz

def orlancz(A, v0, k):
    """
    full orthogonalized Lanczos algorithm (Krylov approximation of a matrix)
        input: 
            A: matrix to approximate
            v0: initial vector (should be in matrix form) 
            k: number of Krylov steps 
        output: 
            V: matrix (large, N*k) containing the orthogonal vectors
            H: matrix (small, k*k) containing the Krylov approximation of A
            
    Author: Vasile Gradinaru, 21.10.2008 (Zuerich)
    """
    print 'FULL ORTHOGONAL LANCZOS METHOD !'
    from numpy import finfo, sqrt
    reps = 10*sqrt(finfo(float).eps)
    V = mat( v0.copy() / norm(v0) )
    alpha =  zeros(k)  
    beta =  zeros(k+1)  
    for m in xrange(k):
        #vt = A * V[ :, m]
        vt = multMv( A , V[ :, m])
        if m > 0: vt -= beta[m] * V[:, m-1]
        alpha[m] = (V[:, m].H * vt )[0, 0]
        vt -= alpha[m] * V[:, m]
        beta[m+1] = norm(vt)
        # reortogonalization
        h1 = multMv(V.H, vt)
        vt -= multMv(V, h1)
        if norm(h1) > reps: vt -= multMv(V, (multMv(V.H,vt)))
        #
        V =  hstack( (V, vt.copy() / beta[m+1] ) ) 
    rbeta = beta[1:-1]    
    H = diag(alpha) + diag(rbeta, 1) + diag(rbeta, -1)
    return V,  H
开发者ID:raoulbq,项目名称:Arnoldi_PyCpp,代码行数:35,代码来源:pyarnoldi.py

示例2: lanczos

def lanczos(A, v0, k):
    """
    Lanczos algorithm (Krylov approximation of a matrix)
        input: 
            A: matrix to approximate
            v0: initial vector (should be in matrix form) 
            k: number of Krylov steps 
        output: 
            V: matrix (large, N*k) containing the orthogonal vectors
            H: matrix (small, k*k) containing the Krylov approximation of A
            
    Author: Vasile Gradinaru, 14.12.2007 (Rennes)
    """
    print 'LANCZOS METHOD !'
    V = mat( v0.copy() / norm(v0) )
    alpha =  zeros(k)  
    beta =  zeros(k+1)  
    for m in xrange(k):
        vt = multMv( A ,  V[ :, m])
        #vt = A * V[ :, m]
        if m > 0: vt -= beta[m] * V[:, m-1]
        alpha[m] = (V[:, m].H * vt )[0, 0]
        vt -= alpha[m] * V[:, m]
        beta[m+1] = norm(vt)
        V =  hstack( (V, vt.copy() / beta[m+1] ) ) 
    rbeta = beta[1:-1]    
    H = diag(alpha) + diag(rbeta, 1) + diag(rbeta, -1)
    return V,  H
开发者ID:raoulbq,项目名称:Arnoldi_PyCpp,代码行数:28,代码来源:pyarnoldi.py

示例3: LMLdebug

    def LMLdebug(self):
        """
        LML function for debug
        """
        assert self.N*self.P<5000, 'gp2kronSum:: N*P>=5000'

        y = SP.reshape(self.Y,(self.N*self.P), order='F') 
        V = SP.kron(SP.eye(self.P),self.F)

        XX = SP.dot(self.Xr,self.Xr.T)
        K  = SP.kron(self.Cr.K(),XX)
        K += SP.kron(self.Cn.K()+self.offset*SP.eye(self.P),SP.eye(self.N))

        # inverse of K
        cholK = LA.cholesky(K)
        Ki = LA.cho_solve((cholK,False),SP.eye(self.N*self.P))

        # Areml and inverse
        Areml = SP.dot(V.T,SP.dot(Ki,V))
        cholAreml = LA.cholesky(Areml)
        Areml_i = LA.cho_solve((cholAreml,False),SP.eye(self.K*self.P))

        # effect sizes and z
        b = SP.dot(Areml_i,SP.dot(V.T,SP.dot(Ki,y)))
        z = y-SP.dot(V,b)
        Kiz = SP.dot(Ki,z)

        # lml
        lml  = y.shape[0]*SP.log(2*SP.pi)
        lml += 2*SP.log(SP.diag(cholK)).sum()
        lml += 2*SP.log(SP.diag(cholAreml)).sum()
        lml += SP.dot(z,Kiz)
        lml *= 0.5

        return lml
开发者ID:PMBio,项目名称:mtSet,代码行数:35,代码来源:gp2kronSumLR.py

示例4: __init__

    def __init__(self,data,cov_matrix=False,loc=None):
        """Parameters
        ----------
        data : array of data, shape=(number points,number dim)
               If cov_matrix is True then data is the covariance matrix (see below)

        Keywords
        --------
        cov_matrix : bool (optional)
                     If True data is treated as a covariance matrix with shape=(number dim, number dim)
        loc        : the mean of the data if a covarinace matrix is given, shape=(number dim)
        """
        if cov_matrix:
            self.dim=data.shape[0]
            self.n=None
            self.data_t=None
            self.mu=loc
            self.evec,eval,V=sl.svd(data,full_matrices=False)
            self.sigma=sqrt(eval)
            self.Sigma=diag(1./self.sigma)
            self.B=dot(self.evec,self.Sigma)
            self.Binv=sl.inv(self.B)
        else:
            self.n,self.dim=data.shape #the shape of input data
            self.mu=data.mean(axis=0) #the mean of the data
            self.data_t=data-self.mu #remove the mean
            self.evec,eval,V=sl.svd(self.data_t.T,full_matrices=False) #get the eigenvectors (axes of the ellipsoid)
            data_p=dot(self.data_t,self.evec) #project the data onto the eigenvectors
            self.sigma=data_p.std(axis=0) #get the spread of the distribution (the axis ratos for the ellipsoid)
            self.Sigma=diag(1./self.sigma) #the eigenvalue matrix for the ellipsoid equation
            self.B=dot(self.evec,self.Sigma) #used in the ellipsoid equation
            self.Binv=sl.inv(self.B) #also useful to have around
开发者ID:cmp346,项目名称:densityplot,代码行数:32,代码来源:error_ellipse.py

示例5: newEpisode

    def newEpisode(self):
        if self.learning:
            params = ravel(self.explorationlayer.module.params)
            target = ravel(sum(self.history.getSequence(self.history.getNumSequences()-1)[2]) / 500)
        
            if target != 0.0:
                self.gp.addSample(params, target)
                if len(self.gp.trainx) > 20:
                    self.gp.trainx = self.gp.trainx[-20:, :]
                    self.gp.trainy = self.gp.trainy[-20:]
                    self.gp.noise = self.gp.noise[-20:]
                    
                self.gp._calculate()
                        
                # get new parameters where mean was highest
                max_cov = diag(self.gp.pred_cov).max()
                indices = where(diag(self.gp.pred_cov) == max_cov)[0]
                pick = indices[random.randint(len(indices))]
                new_param = self.gp.testx[pick]
            
                # check if that one exists already in gp training set
                if len(where(self.gp.trainx == new_param)[0]) > 0:
                    # add some normal noise to it
                    new_param += random.normal(0, 1, len(new_param))

                self.explorationlayer.module._setParameters(new_param)

            else:
                self.explorationlayer.drawRandomWeights()
        
        # don't call StateDependentAgent.newEpisode() because it randomizes the params
        LearningAgent.newEpisode(self)
开发者ID:HKou,项目名称:pybrain,代码行数:32,代码来源:statedependentgp.py

示例6: diag_Ctilde_o_Sr

 def diag_Ctilde_o_Sr(self, i):
     if i < self.Cg.getNumberParams():
         r = sp.kron(sp.diag(self.LcGradCgLc(i)), self.Sr())
     else:
         _i = i - self.Cg.getNumberParams()
         r = sp.kron(sp.diag(self.LcGradCnLc(_i)), sp.ones(self.dim_r))
     return r
开发者ID:jeffhsu3,项目名称:limix,代码行数:7,代码来源:cov2kronSum.py

示例7: _LMLgrad_lik

    def _LMLgrad_lik(self,hyperparams):
        """derivative of the likelihood parameters"""

	logtheta = hyperparams['covar']
        try:   
            KV = self.get_covariances(hyperparams)
        except linalg.LinAlgError:
            LG.error("exception caught (%s)" % (str(hyperparams)))
            return 1E6
	
        #loop through all dimensions
        #logdet term:
        Kd = 2*KV['Knoise']
        dldet = 0.5*(Kd*KV['Si']).sum(axis=0)
        #quadratic term
        y_roti = KV['y_roti']
        dlquad = -0.5 * (y_roti * Kd * y_roti).sum(axis=0)
        if VERBOSE:
            dldet_  = SP.zeros([self.d])
            dlquad_ = SP.zeros([self.d])
            for d in xrange(self.d):
                _K = KV['K'] + SP.diag(KV['Knoise'][:,d])
                _Ki = SP.linalg.inv(_K)
                dldet_[d] = 0.5* SP.dot(_Ki,SP.diag(Kd[:,d])).trace()
                dlquad_[d] = -0.5*SP.dot(self.y[:,d],SP.dot(_Ki,SP.dot(SP.diag(Kd[:,d]),SP.dot(_Ki,self.y[:,d]))))

            assert (SP.absolute(dldet-dldet_)<1E-3).all(), 'outch'
            assert (SP.absolute(dlquad-dlquad_)<1E-3).all(), 'outch'


        LMLgrad = dldet + dlquad
        RV = {'lik': LMLgrad}
    
        return RV
开发者ID:AngelBerihuete,项目名称:pygp,代码行数:34,代码来源:gplvm_ard.py

示例8: build_sample_nn

def build_sample_nn():
	means = [(-1,0),(2,4),(3,1)]
	cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
	alldata = ClassificationDataSet(2, 1, nb_classes=3)
	for n in xrange(400):
	      for klass in range(3):
	                input = multivariate_normal(means[klass],cov[klass])
	                alldata.addSample(input, [klass])

	tstdata_temp, trndata_temp = alldata.splitWithProportion(0.25)

	tstdata = ClassificationDataSet(2, 1, nb_classes=3)
	for n in xrange(0, tstdata_temp.getLength()):
	    tstdata.addSample( tstdata_temp.getSample(n)[0], tstdata_temp.getSample(n)[1] )

	trndata = ClassificationDataSet(2, 1, nb_classes=3)
	for n in xrange(0, trndata_temp.getLength()):
	    trndata.addSample( trndata_temp.getSample(n)[0], trndata_temp.getSample(n)[1] )

	trndata._convertToOneOfMany( )
	tstdata._convertToOneOfMany( )

	fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )


	trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)

	return trainer, fnn, tstdata
开发者ID:jamesfisk,项目名称:thesisc,代码行数:28,代码来源:neural.py

示例9: get_whitener

def get_whitener( A, k ):
    """Return the matrix W that whitens A, i.e. W^T A W = I. Assumes A
    is k-rank"""

    U, D, V = svdk(A, k)
    Ds = sqrt(D)
    Di = 1./Ds
    return U.dot(diag(Di)), U.dot(diag(Ds))
开发者ID:sidaw,项目名称:polymom,代码行数:8,代码来源:MixtureModelSpectral.py

示例10: segmented

def segmented():
    
    radius = 5 
    sigmaI = 0.02 
    sigmaX = 3.0 
    height = img.shape[0]
    width = img.shape[1]
    flatImg = img.flatten()
    darkImg = flatImg
    brightImg = flatImg
    
    nodes = img.flatten()
    
    W = spar.lil_matrix((nodes.size, nodes.size),dtype=float)
    D = sp.zeros((1,nodes.size))
    
    for row in range(height):
        for col in range(width):				
            for k in range(row-radius,row+radius):
                for l in range(col-radius,col+radius):
                    try:
                        w = weight(row,col,k,l)
                        W[row*width+col,k*width+l] = w
                        D[0,row*width+col] += w		
                    except:
                        continue
                        
    D = spar.spdiags(D, 0, nodes.size, nodes.size)

    Q = D - W
     
    D1 = D.todense()
    Q1 = Q.todense()
    
    diags = sp.diag(D1)
    DminusHalf = sp.diag(diags**-0.5)
    
    
    segQ = sp.dot(sp.dot(DminusHalf, Q1),DminusHalf)
    vals, vecs = la.eig(segQ)
    
    vecind = sp.argsort(vals)[1]
    theVec = vecs[vecind]

    for i in range(0,height**2):
        if theVec[i] < 0:
            darkImg[i] = 0.0
        else:
            brightImg[i] = 0.0
            
    
    darkImg = sp.reshape(darkImg, (height,height))
    brightImg = sp.reshape(brightImg, (height,height))
             
    
    
    
    return darkImg, flatImg, brightImg
开发者ID:snowdj,项目名称:byu_macro_boot_camp,代码行数:58,代码来源:Lab16b.py

示例11: exact_moments

def exact_moments( A, w ):
    """Get the exact moments of a components distribution"""

    k = len(w)
    P = A.dot( diag( w ) ).dot( A.T )
    #T = sum( [ w[i] * tensorify( A.T[i], A.T[i], A.T[i] ) for i in xrange( k ) ] )
    T = lambda theta: A.dot( diag( w) ).dot( diag( A.T.dot( theta ) ) ).dot( A.T )

    return P, T    
开发者ID:arunchaganty,项目名称:spectral,代码行数:9,代码来源:SphericalGaussians.py

示例12: MikotaPair

def MikotaPair(n):
    # Mikota pair acts as a nice test since the eigenvalues
    # are the squares of the integers n, n=1,2,...
    x = arange(1,n+1)
    B = diag(1./x)
    y = arange(n-1,0,-1)
    z = arange(2*n-1,0,-2)
    A = diag(z)-diag(y,-1)-diag(y,1)
    return A,B
开发者ID:317070,项目名称:scipy,代码行数:9,代码来源:test_lobpcg.py

示例13: MikotaPair

def MikotaPair(n, dtype=np.dtype("d")):
    # Mikota pair acts as a nice test since the eigenvalues
    # are the squares of the integers n, n=1,2,...
    x = np.arange(1,n+1, dtype=dtype)
    B = diag(1./x)
    y = np.arange(n-1,0,-1, dtype=dtype)
    z = np.arange(2*n-1,0,-2, dtype=dtype)
    A = diag(z)-diag(y,-1)-diag(y,1)
    return A, B
开发者ID:primme,项目名称:primme,代码行数:9,代码来源:tests.py

示例14: main

def main():
    means = [(-1,0),(2,4),(3,1)]
    cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
    alldata = ClassificationDataSet(2, 1, nb_classes=3)
    for n in xrange(400):
        for klass in range(3):
            input = multivariate_normal(means[klass],cov[klass])
            alldata.addSample(input, [klass])
    tstdata, trndata = alldata.splitWithProportion( 0.25 )
    trndata._convertToOneOfMany( )
    tstdata._convertToOneOfMany( )
    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample (input, target, class):"
    print trndata['input'][0], trndata['target'][0], trndata['class'][0]
    
    fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
    ticks = arange(-3.,6.,0.2)
    X, Y = meshgrid(ticks, ticks)
    # need column vectors in dataset, not arrays
    griddata = ClassificationDataSet(2,1, nb_classes=3)
    for i in xrange(X.size):
        griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])
    griddata._convertToOneOfMany()  # this is still needed to make the fnn feel comfy
    
    for i in range(20):
        trainer.trainEpochs(1)
    
        trnresult = percentError( trainer.testOnClassData(),
                                  trndata['class'] )
        tstresult = percentError( trainer.testOnClassData(
               dataset=tstdata ), tstdata['class'] )
    
        print "epoch: %4d" % trainer.totalepochs, \
              "  train error: %5.2f%%" % trnresult, \
              "  test error: %5.2f%%" % tstresult
        
        out = fnn.activateOnDataset(griddata)
        out = out.argmax(axis=1)  # the highest output activation gives the class
        out = out.reshape(X.shape)
        figure(1)
        ioff()  # interactive graphics off
        clf()   # clear the plot
        hold(True) # overplot on
        for c in [0,1,2]:
            here, _ = where(tstdata['class']==c)
            plot(tstdata['input'][here,0],tstdata['input'][here,1],'o')
        if out.max()!=out.min():  # safety check against flat field
            contourf(X, Y, out)   # plot the contour
        ion()   # interactive graphics on
        draw()  # update the plot
        
    ioff()
    show()
开发者ID:jac241,项目名称:Machine-Learning-Neural-Network,代码行数:55,代码来源:nnexample.py

示例15: exact_moments

def exact_moments( alphas, topics ):
    """Get the exact moments of a components distribution"""

    a0 = alphas.sum()
    O = topics

    P = 1/((a0 +1)*a0) * O.dot( diag( alphas ) ).dot( O.T )
    T = lambda theta: 2/((a0+2)*(a0 +1)*a0) * O.dot( diag( O.T.dot(
        theta ) ) ).dot( diag( alphas ) ).dot( O.T )

    return P, T    
开发者ID:arunchaganty,项目名称:spectral,代码行数:11,代码来源:TwoSVD.py


注:本文中的scipy.diag函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。