当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.cholesky函数代码示例

本文整理汇总了Python中tensorflow.cholesky函数的典型用法代码示例。如果您正苦于以下问题:Python cholesky函数的具体用法?Python cholesky怎么用?Python cholesky使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cholesky函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_upper_bound

    def compute_upper_bound(self):
        num_data = tf.cast(tf.shape(self.Y)[0], settings.float_type)

        Kdiag = self.kern.Kdiag(self.X)
        Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
        Kuf = self.feature.Kuf(self.kern, self.X)

        L = tf.cholesky(Kuu)
        LB = tf.cholesky(Kuu + self.likelihood.variance ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))

        LinvKuf = tf.matrix_triangular_solve(L, Kuf, lower=True)
        # Using the Trace bound, from Titsias' presentation
        c = tf.reduce_sum(Kdiag) - tf.reduce_sum(LinvKuf ** 2.0)
        # Kff = self.kern.K(self.X)
        # Qff = tf.matmul(Kuf, LinvKuf, transpose_a=True)

        # Alternative bound on max eigenval:
        # c = tf.reduce_max(tf.reduce_sum(tf.abs(Kff - Qff), 0))
        corrected_noise = self.likelihood.variance + c

        const = -0.5 * num_data * tf.log(2 * np.pi * self.likelihood.variance)
        logdet = tf.reduce_sum(tf.log(tf.diag_part(L))) - tf.reduce_sum(tf.log(tf.diag_part(LB)))

        LC = tf.cholesky(Kuu + corrected_noise ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
        v = tf.matrix_triangular_solve(LC, corrected_noise ** -1.0 * tf.matmul(Kuf, self.Y), lower=True)
        quad = -0.5 * corrected_noise ** -1.0 * tf.reduce_sum(self.Y ** 2.0) + 0.5 * tf.reduce_sum(v ** 2.0)

        return const + logdet + quad
开发者ID:vincentadam87,项目名称:GPflow,代码行数:28,代码来源:sgpr.py

示例2: build_predict

 def build_predict(self, Xnew, full_cov=False):
     """
     Compute the mean and variance of the latent function at some new points
     Xnew. For a derivation of the terms in here, see the associated SGPR
     notebook.
     """
     num_inducing = tf.shape(self.Z)[0]
     err = self.Y - self.mean_function(self.X)
     Kuf = self.kern.K(self.Z, self.X)
     Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
     Kus = self.kern.K(self.Z, Xnew)
     sigma = tf.sqrt(self.likelihood.variance)
     L = tf.cholesky(Kuu)
     A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
     B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
     LB = tf.cholesky(B)
     Aerr = tf.matmul(A, err)
     c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
     tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
     tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
     mean = tf.matmul(tf.transpose(tmp2), c)
     if full_cov:
         var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
             - tf.matmul(tf.transpose(tmp1), tmp1)
         shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 2), shape)
     else:
         var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
             - tf.reduce_sum(tf.square(tmp1), 0)
         shape = tf.pack([1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 1), shape)
     return mean + self.mean_function(Xnew), var
开发者ID:gbohner,项目名称:GPflow,代码行数:32,代码来源:sgpr.py

示例3: build_likelihood

    def build_likelihood(self):
        """
        Constuct a tensorflow function to compute the bound on the marginal
        likelihood. For a derivation of the terms in here, see the associated
        SGPR notebook. 
        """

        num_inducing = tf.shape(self.Z)[0]
        num_data = tf.shape(self.Y)[0]
        output_dim = tf.shape(self.Y)[1]

        err =  self.Y - self.mean_function(self.X)
        Kdiag = self.kern.Kdiag(self.X)
        Kuf = self.kern.K(self.Z, self.X)
        Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
        L = tf.cholesky(Kuu)

        # Compute intermediate matrices
        A = tf.matrix_triangular_solve(L, Kuf, lower=True)*tf.sqrt(1./self.likelihood.variance)
        AAT = tf.matmul(A, tf.transpose(A))
        B = AAT + eye(num_inducing)
        LB = tf.cholesky(B)
        c = tf.matrix_triangular_solve(LB, tf.matmul(A, err), lower=True) * tf.sqrt(1./self.likelihood.variance)

        #compute log marginal bound
        bound = -0.5*tf.cast(num_data*output_dim, tf.float64)*np.log(2*np.pi)
        bound += -tf.cast(output_dim, tf.float64)*tf.reduce_sum(tf.log(tf.user_ops.get_diag(LB)))
        bound += -0.5*tf.cast(num_data*output_dim, tf.float64)*tf.log(self.likelihood.variance)
        bound += -0.5*tf.reduce_sum(tf.square(err))/self.likelihood.variance
        bound += 0.5*tf.reduce_sum(tf.square(c))
        bound += -0.5*(tf.reduce_sum(Kdiag)/self.likelihood.variance - tf.reduce_sum(tf.user_ops.get_diag(AAT)))

        return bound
开发者ID:agarbuno,项目名称:GPflow,代码行数:33,代码来源:sgpr.py

示例4: testNonSquareMatrix

 def testNonSquareMatrix(self):
   with self.assertRaises(ValueError):
     tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
   with self.assertRaises(ValueError):
     tf.cholesky(
         np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
                  ]))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:cholesky_op_test.py

示例5: build_predict

    def build_predict(self, Xnew, full_cov=False):
        """
        Compute the mean and variance of the latent function at some new points
        Xnew. Note that this is very similar to the SGPR prediction, for whcih
        there are notes in the SGPR notebook.
        """
        num_inducing = tf.shape(self.Z)[0]
        psi0, psi1, psi2 = ke.build_psi_stats(self.Z, self.kern, self.X_mean, self.X_var)
        Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
        Kus = self.kern.K(self.Z, Xnew)
        sigma2 = self.likelihood.variance
        sigma = tf.sqrt(sigma2)
        L = tf.cholesky(Kuu)

        A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
        tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
        AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
        B = AAT + eye(num_inducing)
        LB = tf.cholesky(B)
        c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma
        tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
        tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
        mean = tf.matmul(tf.transpose(tmp2), c)
        if full_cov:
            var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
                - tf.matmul(tf.transpose(tmp1), tmp1)
            shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
            var = tf.tile(tf.expand_dims(var, 2), shape)
        else:
            var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
                - tf.reduce_sum(tf.square(tmp1), 0)
            shape = tf.pack([1, tf.shape(self.Y)[1]])
            var = tf.tile(tf.expand_dims(var, 1), shape)
        return mean + self.mean_function(Xnew), var
开发者ID:blutooth,项目名称:dgp,代码行数:34,代码来源:gplvm.py

示例6: build_predict

    def build_predict(self, Xnew , full_cov=False):
       
        
        err = self.Y
        Kuf = self.RBF(self.Z, self.X)
        Kuu = self.RBF(self.Z,self.Z) + eye(self.num_inducing) * 1e-6
        Kus = self.RBF(self.Z, Xnew)
        sigma = tf.sqrt(self.likelihood_variance)
        L = tf.cholesky(Kuu)
        A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
        B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
        LB = tf.cholesky(B)
        Aerr = tf.matmul(A, err)
        c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
        tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
        tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
        mean = tf.matmul(tf.transpose(tmp2), c)
        
        if full_cov:

            var = self.RBF(Xnew, Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
                - tf.matmul(tf.transpose(tmp1), tmp1)
            shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
            var = tf.tile(tf.expand_dims(var, 2), shape)

        else:

            var = self.RBF(Xnew, Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
                - tf.reduce_sum(tf.square(tmp1), 0)
            shape = tf.pack([1, tf.shape(self.Y)[1]])
            var = tf.tile(tf.expand_dims(var, 1), shape)


		return mean , var
开发者ID:SebastianPopescu,项目名称:ConnectionistNetwork,代码行数:34,代码来源:ConnectionistNetwork.py

示例7: _build_predict

 def _build_predict(self, Xnew, full_cov=False):
     """
     Compute the mean and variance of the latent function at some new points
     Xnew. For a derivation of the terms in here, see the associated SGPR
     notebook.
     """
     num_inducing = len(self.feature)
     err = self.Y - self.mean_function(self.X)
     Kuf = self.feature.Kuf(self.kern, self.X)
     Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
     Kus = self.feature.Kuf(self.kern, Xnew)
     sigma = tf.sqrt(self.likelihood.variance)
     L = tf.cholesky(Kuu)
     A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
     B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
     LB = tf.cholesky(B)
     Aerr = tf.matmul(A, err)
     c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
     tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
     tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
     mean = tf.matmul(tmp2, c, transpose_a=True)
     if full_cov:
         var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
               - tf.matmul(tmp1, tmp1, transpose_a=True)
         shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 2), shape)
     else:
         var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
               - tf.reduce_sum(tf.square(tmp1), 0)
         shape = tf.stack([1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 1), shape)
     return mean + self.mean_function(Xnew), var
开发者ID:vincentadam87,项目名称:GPflow,代码行数:32,代码来源:sgpr.py

示例8: gauss_kl

def gauss_kl(min_q_mu, q_sq,K):
    q_mu=-1*min_q_mu

    #q_sqrt=tf.cholesky(tf.squeeze(q_sqrt))
        # K is a variance...we sqrt later
    '''
    N=1
    Q=5
    q_mu=tf.random_normal([Q,1],dtype=tf.float64)
    q_var=tf.random_normal([Q,Q],dtype=tf.float64)
    q_var=q_var+tf.transpose(q_var [1,0])+1e+1*np.eye(Q)
    K=q_var
    q_sqrt=tf.cholesky(q_var)
    q_sqrt=tf.expand_dims(q_sqrt,-1)
    num_latent=1
    s=tf.Session()
    s.run(tf.initialize_all_variables())
    '''
    """
    Compute the KL divergence from

          q(x) = N(q_mu, q_sqrt^2)
    to
          p(x) = N(0, K)

    We assume num_latent independent distributions, given by the columns of
    q_mu and the last dimension of q_sqrt.

    q_mu is a matrix, each column contains a mean.

    q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
        matrix of the covariance of q.

    K is a positive definite matrix: the covariance of p.

    num_latent is an integer: the number of independent distributions (equal to
        the columns of q_mu and the last dim of q_sqrt).

    q_sqrt=tf.cholesky(K)
    L = tf.cholesky(q_sq)
    alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
    KL = 0.5 * tf.reduce_sum(tf.square(alpha))  # Mahalanobis term.
    KL +=   0.5 * tf.reduce_sum(
        tf.log(tf.square(tf.diag_part(L))))  # Prior log-det term.
    KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0], tf.float64)

    Lq = tf.batch_matrix_band_part(q_sqrt, -1, 0)
    # Log determinant of q covariance:
    KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
    LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
    KL += 0.5 * tf.reduce_sum(tf.square(LiLq))  # Trace term
    """
    V2=tf.cholesky(K)
    V1=tf.cholesky(q_sq)
    KL=h.Mul(tf.transpose(q_mu),tf.cholesky_solve(V2,q_mu))
    KL+=tf.trace(tf.cholesky_solve(V2,q_sq))
    KL-=h.get_dim(K,0)
    KL+=tf.reduce_sum(2*tf.log(tf.diag_part(V2))-2*tf.log(tf.diag_part(V1)))
    return KL/2
开发者ID:blutooth,项目名称:dgp,代码行数:59,代码来源:maxKL.py

示例9: log_det

def log_det(Z):
    #conditioned=condition(Z)
    Z=(Z+tf.transpose(Z))/2
    return 2*tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(Z))))

    chol=tf.cholesky(Z)
    logdet=2*tf.reduce_sum(tf.log(tf.diag_part(chol)))
    return logdet
开发者ID:blutooth,项目名称:gp,代码行数:8,代码来源:helper.py

示例10: F_bound2_v2

def F_bound2_v2(y,S,Kmm,Knm,Kmnnm,Tr_Knn,sigma):
    #matrices to be used
    N=get_dim(y,0)
    Kmm_chol=tf.cholesky(Kmm)
    Q_nn=tf.square(sigma)*np.eye(N)+Mul(Knm,tf.cholesky_solve(Kmm_chol,tf.transpose(Knm)))
    bound=-0.5*(Tr_Knn-tf.trace(tf.cholesky_solve(Kmm_chol,Kmnnm)))/tf.square(sigma)
    bound+=multivariate_normal(y, tf.zeros([N,1],dtype=tf.float32), tf.cholesky(Q_nn))
    return bound
开发者ID:blutooth,项目名称:gp,代码行数:8,代码来源:helper.py

示例11: natural_to_meanvarsqrt

def natural_to_meanvarsqrt(nat_1, nat_2):
    var_sqrt_inv = tf.cholesky(-2 * nat_2)
    var_sqrt = _inverse_lower_triangular(var_sqrt_inv)
    S = tf.matmul(var_sqrt, var_sqrt, transpose_a=True)
    mu = tf.matmul(S, nat_1)
    # We need the decomposition of S as L L^T, not as L^T L,
    # hence we need another cholesky.
    return mu, tf.cholesky(S)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:8,代码来源:natgrad_optimizer.py

示例12: multivariate_gaussian_log_density

def multivariate_gaussian_log_density(x, mu,
                                      Sigma=None, L=None,
                                      prec=None, L_prec=None):
    """
    Assume X is a single vector described by a multivariate Gaussian
    distribution with x ~ N(mu, Sigma).

    We accept parameterization in terms of the covariance matrix or
    its cholesky decomposition L (more efficient if available), or the
    precision matrix or its cholesky decomposition L_prec.
    The latter is useful when representing a Gaussian in its natural 
    parameterization. Note that we still require the explicit mean mu
    (not the natural parameter prec*mu) since I'm too lazy to cover
    all the permutations of possible arguments (though this should be
    straightforward). 

    """
    s = extract_shape(x)
    try:
        n, = s
    except:
        n, m = s
        assert(m==1)

    if L is None and Sigma is not None:
        L = tf.cholesky(Sigma)        
    if L_prec is None and prec is not None:
        L_prec = tf.cholesky(prec)
        
    if L is not None:
        neg_half_logdet = -tf.reduce_sum(tf.log(tf.diag_part(L)))
    else:
        assert(L_prec is not None)
        neg_half_logdet = tf.reduce_sum(tf.log(tf.diag_part(L_prec)))
        
    d = tf.reshape(x - mu, (n,1))
    if L is not None:
        alpha = tf.matrix_triangular_solve(L, d, lower=True)
        exponential_part= tf.reduce_sum(tf.square(alpha))
    elif prec is not None:
        d = tf.reshape(d, (n, 1))
        exponential_part = tf.reduce_sum(d * tf.matmul(prec, d))
    else:
        assert(L_prec is not None)
        d = tf.reshape(d, (1, n))
        alpha = tf.matmul(d, L_prec)
        exponential_part= tf.reduce_sum(tf.square(alpha))

    n_log2pi = n * 1.83787706641
    logp =  -0.5 * n_log2pi
    logp += neg_half_logdet
    logp += -0.5 * exponential_part
        
    return logp
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:54,代码来源:dists.py

示例13: Bound1

def Bound1(y,S,Kmm,Knm,Tr_Knn,sigma):
#matrices to be used
    Kmm_chol=tf.cholesky(Kmm)
    sig_2=tf.square(sigma)
    N=h.get_dim(y,0)
    Q_nn=h.Mul(Knm,tf.cholesky_solve(Kmm_chol,tf.transpose(Knm)))
    Q_I_chol=tf.cholesky(sig_2*np.eye(N)+Q_nn)
    bound=-0.5*(Tr_Knn-Q_nn)/sig_2
    bound+=h.multivariate_normal(y, tf.zeros([N,1],dtype=tf.float32), Q_I_chol)
    bound-=0.5*tf.reduce_sum(S)/sig_2+0.1*0.5*tf.reduce_sum(tf.log(S))
    return bound
开发者ID:blutooth,项目名称:gp,代码行数:11,代码来源:BayesianGPLVM.py

示例14: _build_likelihood

    def _build_likelihood(self):
        """
        q_alpha, q_lambda are variational parameters, size N x R
        This method computes the variational lower bound on the likelihood,
        which is:
            E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
        with
            q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
        """
        K = self.kern.K(self.X)
        K_alpha = tf.matmul(K, self.q_alpha)
        f_mean = K_alpha + self.mean_function(self.X)

        # compute the variance for each of the outputs
        I = tf.tile(tf.expand_dims(tf.eye(self.num_data, dtype=settings.float_type), 0),
                    [self.num_latent, 1, 1])
        A = I + tf.expand_dims(tf.transpose(self.q_lambda), 1) * \
            tf.expand_dims(tf.transpose(self.q_lambda), 2) * K
        L = tf.cholesky(A)
        Li = tf.matrix_triangular_solve(L, I)
        tmp = Li / tf.expand_dims(tf.transpose(self.q_lambda), 1)
        f_var = 1. / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))

        # some statistics about A are used in the KL
        A_logdet = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
        trAi = tf.reduce_sum(tf.square(Li))

        KL = 0.5 * (A_logdet + trAi - self.num_data * self.num_latent +
                    tf.reduce_sum(K_alpha * self.q_alpha))

        v_exp = self.likelihood.variational_expectations(f_mean, f_var, self.Y)
        return tf.reduce_sum(v_exp) - KL
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:32,代码来源:vgp.py

示例15: initialize

  def initialize(self, *args, **kwargs):
    # Store latent variables in a temporary attribute; MAP will
    # optimize `PointMass` random variables, which subsequently
    # optimizes mean parameters of the normal approximations.
    latent_vars_normal = self.latent_vars.copy()
    self.latent_vars = {z: PointMass(params=qz.loc)
                        for z, qz in six.iteritems(latent_vars_normal)}

    super(Laplace, self).initialize(*args, **kwargs)

    hessians = tf.hessians(self.loss, list(six.itervalues(self.latent_vars)))
    self.finalize_ops = []
    for z, hessian in zip(six.iterkeys(self.latent_vars), hessians):
      qz = latent_vars_normal[z]
      if isinstance(qz, (MultivariateNormalDiag, Normal)):
        scale_var = get_variables(qz.variance())[0]
        scale = 1.0 / tf.diag_part(hessian)
      else:  # qz is MultivariateNormalTriL
        scale_var = get_variables(qz.covariance())[0]
        scale = tf.matrix_inverse(tf.cholesky(hessian))

      self.finalize_ops.append(scale_var.assign(scale))

    self.latent_vars = latent_vars_normal.copy()
    del latent_vars_normal
开发者ID:wujsAct,项目名称:edward,代码行数:25,代码来源:laplace.py


注:本文中的tensorflow.cholesky函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。