当前位置: 首页>>代码示例>>Python>>正文


Python tensor.diag函数代码示例

本文整理汇总了Python中theano.tensor.diag函数的典型用法代码示例。如果您正苦于以下问题:Python diag函数的具体用法?Python diag怎么用?Python diag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了diag函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: retr

    def retr(self, X, Z, t=None):
        if t is None:
            t = 1.0
        Qu, Ru = tensor.nlinalg.QRFull(Z.Up)

        # we need rq decomposition here
        Qv, Rv = tensor.nlinalg.QRFull(Z.Vp[::-1].T)
        Rv = Rv.T[::-1]
        Rv[:, :] = Rv[:, ::-1]
        Qv = Qv.T[::-1]

        # now we have rq decomposition (Rv @ Qv = Z.Vp)
        #Rv, Qv = rq(Z.Vp, mode='economic')


        zero_block = tensor.zeros((Ru.shape[0], Rv.shape[1]))
        block_mat = tensor.stack(
            (
                tensor.stack((X.S + t * Z.M, t * Rv), 1).reshape((Rv.shape[0], -1)),
                tensor.stack((t * Ru, zero_block), 1).reshape((Ru.shape[0], -1))
            )
        ).reshape((-1, Ru.shape[1] + Rv.shape[1]))

        Ut, St, Vt = tensor.nlinalg.svd(block_mat, full_matrices=False)

        U = tensor.stack((X.U, Qu), 1).reshape((Qu.shape[0], -1)).dot(Ut[:, :self._k])
        V = Vt[:self._k, :].dot(tensor.stack((X.V, Qv), 0).reshape((-1, Qv.shape[1])))
        # add some machinery eps to get a slightly perturbed element of a manifold
        # even if we have some zeros in S
        S = tensor.diag(St[:self._k]) + tensor.diag(np.spacing(1) * tensor.ones(self._k))
        return ManifoldElementShared.from_vars((U, S, V), shape=(self._m, self._n), r=self._k)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:31,代码来源:fixed_rank.py

示例2: L_op

    def L_op(self, inputs, outputs, gradients):
        # Modified from theano/tensor/slinalg.py
        # No handling for on_error = 'nan'
        dz = gradients[0]
        chol_x = outputs[0]

        # this is for nan mode
        #
        # ok = ~tensor.any(tensor.isnan(chol_x))
        # chol_x = tensor.switch(ok, chol_x, 1)
        # dz = tensor.switch(ok, dz, 1)

        # deal with upper triangular by converting to lower triangular
        if not self.lower:
            chol_x = chol_x.T
            dz = dz.T

        def tril_and_halve_diagonal(mtx):
            """Extracts lower triangle of square matrix and halves diagonal."""
            return tensor.tril(mtx) - tensor.diag(tensor.diagonal(mtx) / 2.)

        def conjugate_solve_triangular(outer, inner):
            """Computes L^{-T} P L^{-1} for lower-triangular L."""
            return gpu_solve_upper_triangular(
                outer.T, gpu_solve_upper_triangular(outer.T, inner.T).T)

        s = conjugate_solve_triangular(
            chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))

        if self.lower:
            grad = tensor.tril(s + s.T) - tensor.diag(tensor.diagonal(s))
        else:
            grad = tensor.triu(s + s.T) - tensor.diag(tensor.diagonal(s))

        return [grad]
开发者ID:Theano,项目名称:Theano,代码行数:35,代码来源:linalg.py

示例3: retr

    def retr(self, X, Z, t=None):
        U, S, V = X
        Up, M, Vp = Z
        if t is None:
            t = 1.0
        Qu, Ru = tensor.nlinalg.qr(Up)

        # we need rq decomposition here
        Qv, Rv = tensor.nlinalg.qr(Vp[::-1].T)
        Rv = Rv.T[::-1]
        Rv = Rv[:, ::-1]
        Qv = Qv.T[::-1]

        # now we have rq decomposition (Rv @ Qv = Z.Vp)
        #Rv, Qv = rq(Z.Vp, mode='economic')


        zero_block = tensor.zeros((Ru.shape[0], Rv.shape[1]))
        block_mat = tensor.stack(
            (
                tensor.stack((S + t * M, t * Rv), 1).reshape((Rv.shape[0], -1)),
                tensor.stack((t * Ru, zero_block), 1).reshape((Ru.shape[0], -1))
            )
        ).reshape((-1, Ru.shape[1] + Rv.shape[1]))

        Ut, St, Vt = tensor.nlinalg.svd(block_mat, full_matrices=False)

        U_res = tensor.stack((U, Qu), 1).reshape((Qu.shape[0], -1)).dot(Ut[:, :self._k])
        V_res = Vt[:self._k, :].dot(tensor.stack((V, Qv), 0).reshape((-1, Qv.shape[1])))
        # add some machinery eps to get a slightly perturbed element of a manifold
        # even if we have some zeros in S
        S_res = tensor.diag(St[:self._k]) + tensor.diag(np.spacing(1) * tensor.ones(self._k))
        return (U_res, S_res, V_res)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:33,代码来源:fixed_rank_splitted.py

示例4: __call__

    def __call__(self, f):
        """
        Compute the following function:
            E(f) = ||f_l - y_l||^2 + mu f^T L f + mu eps ||f||^2,

        :param f: Theano tensor
            Vector of N continuous elements.
        :return: Theano tensor
            Energy (cost) of the vector f.
        """
        # Compute the un-normalized graph Laplacian: L = D - W
        D = T.diag(self.W.sum(axis=0))
        L = D - self.W

        # Compute the label consistency
        S = T.diag(self.L)
        El = (f - self.y).T.dot(S.dot(f - self.y))

        # Compute the smoothness along the similarity graph
        I = T.eye(self.L.shape[0])
        Es = f.T.dot(L.dot(f)) + self.eps * f.T.dot(I.dot(f))

        # Compute the whole cost function
        E = El + self.mu * Es

        return E
开发者ID:pminervini,项目名称:knowledge-propagation,代码行数:26,代码来源:model.py

示例5: SampleKsi

 def SampleKsi(d, u, mu, eps):  # icml14SBP(20)
     dn = 1.0/d
     uDnu = T.sum(u*u*dn)
     coeff = ( 1-1.0/T.sqrt(1.0+uDnu) ) / (uDnu+SMALLNUM)
     u = u.reshape((u.shape[0],1))
     R = T.diag(T.sqrt(dn)) - coeff*T.dot( T.dot(T.diag(dn),T.dot(u,u.T)), T.diag(T.sqrt(dn)) )
     return mu + T.dot(R,eps)
开发者ID:sshidy,项目名称:SBP-DLGM,代码行数:7,代码来源:mlp.py

示例6: ehess2rhess

    def ehess2rhess(self, X, egrad, ehess, H):
        # Euclidean part
        rhess = self.proj(X, ehess)
        Sinv = tensor.diag(1.0 / tensor.diag(X.S))

        # Curvature part
        T = self.apply_ambient(egrad, H.Vp.T).dot(Sinv)
        rhess.Up += (T - X.U.dot(X.U.T.dot(T)))
        T = self.apply_ambient_transpose(egrad, H.Up).dot(Sinv)
        rhess.Vp += (T - X.V.T.dot(X.V.dot(T))).T
        return rhess
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:11,代码来源:old_fixed_rank.py

示例7: _global_error

    def _global_error(self, targetM, i, lastM):
        mask = T.neq(self._y[self._set[:, 1]], self._y[self._set[:, 2]])
        f = T.nnet.sigmoid # T.tanh 
        g = lambda x, y: x*(1-y) #lambda x: T.maximum(x, 0)
        # g(lst_prediction - cur_prediction) 
        # f(T.diag(lossil - lossij))

        if i == 0:
            # pull_error for global 0
            pull_error = 0.
            ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]]
            jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]]
            diffv = ivectors - jvectors
            pull_error = linalg.trace(diffv.dot(targetM).dot(diffv.T))
        else:
            ivectors = self._stackx[:, i, :][self._neighborpairs[:, 0]]
            jvectors = self._stackx[:, i, :][self._neighborpairs[:, 1]]
            diffv1 = ivectors - jvectors
            distMcur = diffv1.dot(targetM).dot(diffv1.T)
#           ivectors = self._stackx[:, i-1, :][self._neighborpairs[:, 0]]
#           jvectors = self._stackx[:, i-1, :][self._neighborpairs[:, 1]]
#           diffv2 = ivectors - jvectors
#           distMlast = diffv2.dot(lastM).dot(diffv2.T)
            pull_error = linalg.trace(T.maximum(distMcur, 0))


        push_error = 0.0
        ivectors = self._stackx[:, i, :][self._set[:, 0]]
        jvectors = self._stackx[:, i, :][self._set[:, 1]]
        lvectors = self._stackx[:, i, :][self._set[:, 2]]
        diffij = ivectors - jvectors
        diffil = ivectors - lvectors
        lossij = diffij.dot(targetM).dot(diffij.T)
        lossil = diffil.dot(targetM).dot(diffil.T)
        #cur_prediction = T.diag(lossij - lossil)
        cur_prediction = f(T.diag(lossil - lossij))

        ivectors = self._stackx[:, i-1, :][self._set[:, 0]]
        jvectors = self._stackx[:, i-1, :][self._set[:, 1]]
        lvectors = self._stackx[:, i-1, :][self._set[:, 2]]
        diffij = ivectors - jvectors
        diffil = ivectors - lvectors
        if i == 0:
            lossij = diffij.dot(diffij.T)
            lossil = diffil.dot(diffil.T)
        else:
            lossij = diffij.dot(lastM).dot(diffij.T)
            lossil = diffil.dot(lastM).dot(diffil.T)
        lst_prediction = f(T.diag(lossil - lossij))
        push_error = T.sum(mask*(g(lst_prediction, cur_prediction)))


        return pull_error, push_error 
开发者ID:PiscesDream,项目名称:Lab_MMAPM,代码行数:53,代码来源:MLMNN.sbs.py

示例8: __call__

    def __call__(self, A, b, inference=False):
        dA = T.diagonal(A)
        D = T.diag(dA)
        R = A - D

        iD = T.diag(1.0 / dA)

        x = T.zeros_like(b)
        for i in range(self.iterations):
            x = iD.dot(b - R.dot(x))

        return x
开发者ID:pminervini,项目名称:knowledge-propagation,代码行数:12,代码来源:linearsystem.py

示例9: from_partial_old

    def from_partial_old(self, X, dX):
        eps = 1e-10#np.spacing(1)
        U, S, V = X
        dU, dS, dV = dX
        S = tensor.diag(S)
        S_pinv = tensor.switch(tensor.gt(abs(S), eps), 1.0 / S, 0.0)
        S_pinv = tensor.diag(S_pinv)
        ZV = dU.dot(S_pinv)
        UtZV = dS
        ZtU = S_pinv.dot(dV)

        Zproj = (ZV - U.dot(UtZV), UtZV, ZtU - (UtZV.dot(V)))
        return Zproj
开发者ID:Nehoroshiy,项目名称:kron_layer_lasagne,代码行数:13,代码来源:fixed_rank_embeeded.py

示例10: diagCholInvLogDet_fromDiag

def diagCholInvLogDet_fromDiag(diag_vec, name):

    diag_mat = T.diag(diag_vec.flatten())
    inv = T.diag(1.0 / diag_vec.flatten())
    chol = T.diag(T.sqrt(diag_vec.flatten()))
    logDet = T.sum(T.log(diag_vec.flatten()))  # scalar

    diag_mat.name = name
    chol.name = "c" + name
    inv.name = "i" + name
    logDet.name = "logDet" + name

    return (diag_mat, chol, inv, logDet)
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:13,代码来源:utils.py

示例11: diagCholInvLogDet_fromLogDiag

def diagCholInvLogDet_fromLogDiag(logdiag, name):

    diag = T.diag(T.exp(logdiag.flatten()))
    inv = T.diag(T.exp(-logdiag.flatten()))
    chol = T.diag(T.exp(0.5 * logdiag.flatten()))
    logDet = T.sum(logdiag)  # scalar

    diag.name = name
    chol.name = "c" + name
    inv.name = "i" + name
    logDet.name = "logDet" + name

    return (diag, chol, inv, logDet)
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:13,代码来源:utils.py

示例12: grad

    def grad(self, inputs, gradients):
        """
        Cholesky decomposition reverse-mode gradient update.

        Symbolic expression for reverse-mode Cholesky gradient taken from [0]_

        References
        ----------
        .. [0] I. Murray, "Differentiation of the Cholesky decomposition",
           http://arxiv.org/abs/1602.07527

        """

        x = inputs[0]
        dz = gradients[0]
        chol_x = self(x)

        # Replace the cholesky decomposition with 1 if there are nans
        # or solve_upper_triangular will throw a ValueError.
        if self.on_error == 'nan':
            ok = ~tensor.any(tensor.isnan(chol_x))
            chol_x = tensor.switch(ok, chol_x, 1)
            dz = tensor.switch(ok, dz, 1)

        # deal with upper triangular by converting to lower triangular
        if not self.lower:
            chol_x = chol_x.T
            dz = dz.T

        def tril_and_halve_diagonal(mtx):
            """Extracts lower triangle of square matrix and halves diagonal."""
            return tensor.tril(mtx) - tensor.diag(tensor.diagonal(mtx) / 2.)

        def conjugate_solve_triangular(outer, inner):
            """Computes L^{-T} P L^{-1} for lower-triangular L."""
            return solve_upper_triangular(
                outer.T, solve_upper_triangular(outer.T, inner.T).T)

        s = conjugate_solve_triangular(
            chol_x, tril_and_halve_diagonal(chol_x.T.dot(dz)))

        if self.lower:
            grad = tensor.tril(s + s.T) - tensor.diag(tensor.diagonal(s))
        else:
            grad = tensor.triu(s + s.T) - tensor.diag(tensor.diagonal(s))

        if self.on_error == 'nan':
            return [tensor.switch(ok, grad, np.nan)]
        else:
            return [grad]
开发者ID:HapeMask,项目名称:Theano,代码行数:50,代码来源:slinalg.py

示例13: recurrence

 def recurrence(x_t, h_tm1, c_tm1):
     i_t = TT.nnet.sigmoid(TT.dot(x_t, W_xi) +
                           TT.dot(h_tm1, W_hi) +
                           TT.dot(c_tm1, TT.diag(W_ci)) + b_i)
     f_t = TT.nnet.sigmoid(TT.dot(x_t, W_xf) +
                           TT.dot(h_tm1, W_hf) +
                           TT.dot(c_tm1, TT.diag(W_cf)) + b_f)
     c_t = f_t * c_tm1 + i_t * TT.tanh(TT.dot(x_t, W_xc) +
                                       TT.dot(h_tm1, W_hc) + b_c)
     o_t = TT.nnet.sigmoid(TT.dot(x_t, W_xo) +
                           TT.dot(h_tm1, W_ho) +
                           TT.dot(c_t, TT.diag(W_co)) + b_o)
     h_t = o_t * TT.tanh(c_t)
     return h_t, c_t
开发者ID:RuinCakeLie,项目名称:theano-nets,代码行数:14,代码来源:recurrent.py

示例14: log_p_y_I_zA

    def log_p_y_I_zA(self):

        sum_y_outers = T.sum(self.Y**2)
        sum_z_IBP_mean_phi_y = T.sum( T.dot( (T.dot(self.phi_IBP, self.Y.T)).T,self.z_IBP_mean ) )
        # sum_z_IBP_mean_phi_outer = T.tril(T.dot(z_IBP_mean.T, z_IBP_mean)) * T.tril()
        # sum_z_IBP_mean_phi_Phi = T.sum( T.dot(z_IBP_mean.T, (self.Phi_traces+T.sum(self.phi_IBP**2, 1)) )  )
        sum_2ndOrder_term = T.sum( T.dot(self.z_IBP_samp.T, T.dot(T.dot(self.phi_IBP, self.phi_IBP.T)
                          + T.diag(T.diag(self.get_tensor_traces_scan(self.Phi_IBP))), self.z_IBP_samp)) )

        term = -0.5*self.D*self.B*(log2pi*self.sigma_y**2) \
             -0.5*(self.sigma_y**-2)*(sum_y_outers -2*sum_z_IBP_mean_phi_y \
                    + sum_2ndOrder_term)

        return term
开发者ID:roryjbeard,项目名称:GP-LVM-Conditional-MF,代码行数:14,代码来源:IBP_factor_model.py

示例15: get_output_for

 def get_output_for(self, input, **kwargs):
     xin_shape = input.shape
     if input.ndim > 2:
         # if the input has more than two dimensions, flatten it into a
         # batch of feature vectors.
         input = input.flatten(2)
     activation = T.zeros((input.shape[0], self.shape1[1] * self.shape2[1]))
     s = T.diag(T.sqrt(T.diag(self.S)))
     u = self.U.dot(s)
     w = s.dot(self.V)
     for i in range(self.manifold._k):
         activation += apply_mat_to_kron(input,
                             u[:, i].reshape((self.shape1[::-1])).T,
                             w[i, :].reshape((self.shape2[::-1])).T)
     return activation
开发者ID:Nehoroshiy,项目名称:kron_layer_lasagne,代码行数:15,代码来源:kron_layer.py


注:本文中的theano.tensor.diag函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。