当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.matrix_transpose函数代码示例

本文整理汇总了Python中tensorflow.matrix_transpose函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_transpose函数的具体用法?Python matrix_transpose怎么用?Python matrix_transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了matrix_transpose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _sample_conditional

def _sample_conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False, num_samples=None):
    """
    `sample_conditional` will return a sample from the conditional distribution.
    In most cases this means calculating the conditional mean m and variance v and then
    returning m + sqrt(v) * eps, with eps ~ N(0, 1).
    However, for some combinations of Mok and Mof more efficient sampling routines exists.
    The dispatcher will make sure that we use the most efficient one.

    :return: samples, mean, cov
        samples has shape [num_samples, N, P] or [N, P] if num_samples is None
        mean and cov as for conditional()
    """
    if full_cov and full_output_cov:
        raise NotImplementedError("The combination of both full_cov and full_output_cov is not "
                                  "implemented for sample_conditional.")

    logger.debug("sample conditional: InducingFeature Kernel")
    mean, cov = conditional(Xnew, feat, kern, f, q_sqrt=q_sqrt, white=white,
                            full_cov=full_cov, full_output_cov=full_output_cov)
    if full_cov:
        # mean: N x P
        # cov: P x N x N
        mean = tf.matrix_transpose(mean)  # now P x N
        samples = _sample_mvn(mean, cov, 'full', num_samples=num_samples)  # (S x) P x N
        samples = tf.matrix_transpose(samples)  # now (S x) N x P

    else:
        cov_structure = "full" if full_output_cov else "diag"
        samples = _sample_mvn(mean, cov, cov_structure, num_samples=num_samples)  # [(S,), N, P]

    return samples, mean, cov
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:31,代码来源:conditionals.py

示例2: _quadrature_expectation

def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points):
    """
    Handling of quadrature expectations for Markov Gaussians (useful for time series)
    Fallback method for missing analytic expectations wrt Markov Gaussians
    Nota Bene: obj1 is always associated with x_n, whereas obj2 always with x_{n+1}
               if one requires e.g. <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}), compute the
               transpose and then transpose the result of the expectation
    """
    num_gauss_hermite_points = 40 if num_gauss_hermite_points is None else num_gauss_hermite_points

    warnings.warn("Quadrature is used to calculate the expectation. This means that "
                  "an analytical implementations is not available for the given combination.")

    if obj2 is None:
        eval_func = lambda x: get_eval_func(obj1, feature1)(x)
        mu, cov = p.mu[:-1], p.cov[0, :-1]  # cross covariances are not needed
    elif obj1 is None:
        eval_func = lambda x: get_eval_func(obj2, feature2)(x)
        mu, cov = p.mu[1:], p.cov[0, 1:]  # cross covariances are not needed
    else:
        eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(tf.split(x, 2, 1)[0]) *
                               get_eval_func(obj2, feature2, np.s_[:, None, :])(tf.split(x, 2, 1)[1]))
        mu = tf.concat((p.mu[:-1, :], p.mu[1:, :]), 1)  # Nx2D
        cov_top = tf.concat((p.cov[0, :-1, :, :], p.cov[1, :-1, :, :]), 2)  # NxDx2D
        cov_bottom = tf.concat((tf.matrix_transpose(p.cov[1, :-1, :, :]), p.cov[0, 1:, :, :]), 2)
        cov = tf.concat((cov_top, cov_bottom), 1)  # Nx2Dx2D

    return mvnquad(eval_func, mu, cov, num_gauss_hermite_points)
开发者ID:vincentadam87,项目名称:GPflow,代码行数:28,代码来源:expectations.py

示例3: testNonBatchMatrix

 def testNonBatchMatrix(self):
   matrix = [[1, 2, 3], [4, 5, 6]]  # Shape (2, 3)
   expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
   with self.test_session():
     transposed = tf.matrix_transpose(matrix)
     self.assertEqual((3, 2), transposed.get_shape())
     self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:Qstar,项目名称:tensorflow,代码行数:7,代码来源:array_ops_test.py

示例4: testNonBatchMatrixDynamicallyDefined

 def testNonBatchMatrixDynamicallyDefined(self):
     matrix = [[1, 2, 3], [4, 5, 6]]  # Shape (2, 3)
     expected_transposed = [[1, 4], [2, 5], [3, 6]]  # Shape (3, 2)
     with self.test_session():
         matrix_ph = tf.placeholder(tf.int32)
         transposed = tf.matrix_transpose(matrix_ph)
         self.assertAllEqual(expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix}))
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:7,代码来源:array_ops_test.py

示例5: _dot

 def _dot(self, slist1, slist2, tf_embs):
     """
     Simple dot product between two vectors of embeddings.
     This returns a matrix of positive real numbers.
     """
     matlist1 = tf.gather(tf_embs, slist1, name='matlist1')
     matlist2 = tf.matrix_transpose(tf.gather(tf_embs, slist2, name='matlist2'))
     return tf.batch_matmul(matlist1, matlist2)
开发者ID:beckdaniel,项目名称:flakes,代码行数:8,代码来源:sk_tf_batch.py

示例6: _expectation

def _expectation(p, kern, feat, mean, none, nghp=None):
    """
    Compute the expectation:
    expectation[n] = <K_{Z, x_n} m(x_n)>_p(x_n)
    or the equivalent for MarkovGaussian

    :return: NxMxQ
    """
    return tf.matrix_transpose(expectation(p, mean, (kern, feat), nghp=nghp))
开发者ID:vincentadam87,项目名称:GPflow,代码行数:9,代码来源:expectations.py

示例7: _conditional

def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False):
    """
    Most efficient routine to project L independent latent gps through a mixing matrix W.
    The mixing matrix is a member of the `SeparateMixedMok` and has shape P x L.

    The covariance matrices used to calculate the conditional have the following shape:
    - Kuu: L x M x M
    - Kuf: L x M x N
    - Kff: L x N or L x N x N

    Further reference
    -----------------
    - See `gpflow.conditionals._conditional` for a detailed explanation of
      conditional in the single-output case.
    - See the multiouput notebook for more information about the multiouput framework.

    """
    logger.debug("conditional: (MixedKernelSharedMof, MixedKernelSeparateMof), SeparateMixedMok")
    independent_cond = conditional.dispatch(object, SeparateIndependentMof, SeparateIndependentMok, object)
    gmu, gvar = independent_cond(Xnew, feat, kern, f, full_cov=full_cov, q_sqrt=q_sqrt,
                                 full_output_cov=False, white=white)  # N x L, L x N x N or N x L

    gmu = tf.matrix_transpose(gmu)  # L x N
    if not full_cov:
        gvar = tf.matrix_transpose(gvar)  # L x N (x N)

    Wgmu = tf.tensordot(gmu, kern.W, [[0], [1]])  # N x P

    if full_output_cov:
        Wt_expanded = tf.matrix_transpose(kern.W)[:, None, :]  # L x 1 x P
        if full_cov:
            Wt_expanded = tf.expand_dims(Wt_expanded, axis=-1)  # L x 1 x P x 1

        gvarW = tf.expand_dims(gvar, axis=2) * Wt_expanded  # L x N x P (x N)
        WgvarW = tf.tensordot(gvarW, kern.W, [[0], [1]])  # N x P (x N) x P
    else:
        if not full_cov:
            WgvarW = tf.tensordot(gvar, kern.W ** 2, [[0], [1]])  # N x P
        else:
            WgvarW = tf.tensordot(kern.W ** 2, gvar, [[1], [0]])  # P x N (x N)

    return Wgmu, WgvarW
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:42,代码来源:conditionals.py

示例8: testBatchMatrix

 def testBatchMatrix(self):
   matrix_0 = [[1, 2, 3], [4, 5, 6]]
   matrix_0_t = [[1, 4], [2, 5], [3, 6]]
   matrix_1 = [[11, 22, 33], [44, 55, 66]]
   matrix_1_t = [[11, 44], [22, 55], [33, 66]]
   batch_matrix = [matrix_0, matrix_1]  # Shape (2, 2, 3)
   expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
   with self.test_session():
     transposed = tf.matrix_transpose(batch_matrix)
     self.assertEqual((2, 3, 2), transposed.get_shape())
     self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:Qstar,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py

示例9: testBatchMatrixDynamicallyDefined

 def testBatchMatrixDynamicallyDefined(self):
     matrix_0 = [[1, 2, 3], [4, 5, 6]]
     matrix_0_t = [[1, 4], [2, 5], [3, 6]]
     matrix_1 = [[11, 22, 33], [44, 55, 66]]
     matrix_1_t = [[11, 44], [22, 55], [33, 66]]
     batch_matrix = [matrix_0, matrix_1]  # Shape (2, 2, 3)
     expected_transposed = [matrix_0_t, matrix_1_t]  # Shape (2, 3, 2)
     with self.test_session():
         batch_matrix_ph = tf.placeholder(tf.int32)
         transposed = tf.matrix_transpose(batch_matrix_ph)
         self.assertAllEqual(expected_transposed, transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py

示例10: _scaled_square_dist

    def _scaled_square_dist(self, X, X2):
        """
        Returns ((X - X2ᵀ)/lengthscales)².
        Due to the implementation and floating-point imprecision, the
        result may actually be very slightly negative for entries very
        close to each other.
        """
        X = X / self.lengthscales
        Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)

        if X2 is None:
            dist = -2 * tf.matmul(X, X, transpose_b=True)
            dist += Xs + tf.matrix_transpose(Xs)
            return dist

        X2 = X2 / self.lengthscales
        X2s = tf.reduce_sum(tf.square(X2), axis=-1, keepdims=True)
        dist = -2 * tf.matmul(X, X2, transpose_b=True)
        dist += Xs + tf.matrix_transpose(X2s)
        return dist
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:20,代码来源:kernels.py

示例11: create

  def create(self,
             fixed_embeddings,
             linked_embeddings,
             context_tensor_arrays,
             attention_tensor,
             during_training,
             stride=None):
    """Requires |stride|; otherwise see base class."""
    check.NotNone(stride,
                  'BiaffineDigraphNetwork requires "stride" and must be called '
                  'in the bulk feature extractor component.')

    # TODO(googleuser): Add dropout during training.
    del during_training

    # Retrieve (possibly averaged) weights.
    weights_arc = self._component.get_variable('weights_arc')
    weights_source = self._component.get_variable('weights_source')
    root = self._component.get_variable('root')

    # Extract the source and target token activations.  Use |stride| to collapse
    # batch and beam into a single dimension.
    sources = network_units.lookup_named_tensor('sources', linked_embeddings)
    targets = network_units.lookup_named_tensor('targets', linked_embeddings)
    source_tokens_bxnxs = tf.reshape(sources.tensor,
                                     [stride, -1, self._source_dim])
    target_tokens_bxnxt = tf.reshape(targets.tensor,
                                     [stride, -1, self._target_dim])
    num_tokens = tf.shape(source_tokens_bxnxs)[1]

    # Compute the arc, source, and root potentials.
    arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens(
        source_tokens_bxnxs, target_tokens_bxnxt, weights_arc)
    sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens(
        source_tokens_bxnxs, weights_source)
    roots_bxn = digraph_ops.RootPotentialsFromTokens(
        root, target_tokens_bxnxt, weights_arc, weights_source)

    # Combine them into a single matrix with the roots on the diagonal.
    adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials(
        arcs_bxnxn + sources_bxnxn, roots_bxn)

    # The adjacency matrix currently has sources on rows and targets on columns,
    # but we want targets on rows so that maximizing within a row corresponds to
    # selecting sources for a given target.
    adjacency_bxnxn = tf.matrix_transpose(adjacency_bxnxn)

    return [tf.reshape(adjacency_bxnxn, [-1, num_tokens])]
开发者ID:ALISCIFP,项目名称:models,代码行数:48,代码来源:biaffine_units.py

示例12: _updated_mat

  def _updated_mat(self, mat, v, diag):
    # Get dense matrix defined by its square root, which is an update of `mat`:
    # A = (mat + v D v^T) (mat + v D v^T)^T
    # D is the diagonal matrix with `diag` on the diagonal.

    # If diag is None, then it defaults to the identity matrix, so DV^T = V^T
    if diag is None:
      diag_vt = tf.matrix_transpose(v)
    else:
      diag_mat = tf.matrix_diag(diag)
      diag_vt = tf.matmul(diag_mat, v, adjoint_b=True)

    v_diag_vt = tf.matmul(v, diag_vt)
    sqrt = mat + v_diag_vt
    a = tf.matmul(sqrt, sqrt, adjoint_b=True)
    return a.eval()
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:16,代码来源:operator_pd_vdvt_update_test.py

示例13: _arccosine

 def _arccosine(self, slist1, slist2, tf_embs):
     """
     Uses an arccosine kernel of degree 0 to calculate
     the similarity matrix between two vectors of embeddings. 
     This is just cosine similarity projected into the [0,1] interval.
     """
     dot = self._dot(slist1, slist2, tf_embs)
     # This calculation corresponds to an arc-cosine with 
     # degree 0. It can be interpreted as cosine
     # similarity but projected into a [0,1] interval.
     # TODO: arc-cosine with degree 1.
     tf_pi = tf.constant(np.pi, dtype=tf.float64)
     tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')
     normlist1 = tf.gather(tf_norms, slist1, name='normlist1')
     normlist2 = tf.matrix_transpose(tf.gather(tf_norms, slist2, name='normlist2'))
     norms = tf.batch_matmul(normlist1, normlist2)
     cosine = tf.clip_by_value(tf.truediv(dot, norms), -1, 1)
     angle = tf.acos(cosine)
     angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)
     return 1 - (angle / tf_pi)
开发者ID:beckdaniel,项目名称:flakes,代码行数:20,代码来源:sk_tf_batch.py

示例14: K

    def K(self, X, X2=None, presliced=False):
        if not presliced:
            X, X2 = self._slice(X, X2)

        X_denominator = tf.sqrt(self._weighted_product(X))
        if X2 is None:
            X2 = X
            X2_denominator = X_denominator
        else:
            X2_denominator = tf.sqrt(self._weighted_product(X2))

        numerator = self._weighted_product(X, X2)
        X_denominator = tf.expand_dims(X_denominator, -1)
        X2_denominator = tf.matrix_transpose(tf.expand_dims(X2_denominator, -1))
        cos_theta = numerator / X_denominator / X2_denominator
        jitter = 1e-15
        theta = tf.acos(jitter + (1 - 2 * jitter) * cos_theta)

        return self.variance * (1. / np.pi) * self._J(theta) * \
               X_denominator ** self.order * \
               X2_denominator ** self.order
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:21,代码来源:kernels.py

示例15: _validate_correlationness

 def _validate_correlationness(self, x):
   if not self.validate_args:
     return x
   checks = [
       tf.assert_less_equal(
           tf.cast(-1., dtype=x.dtype.base_dtype),
           x,
           message='Correlations must be >= -1.'),
       tf.assert_less_equal(
           x,
           tf.cast(1., x.dtype.base_dtype),
           message='Correlations must be <= 1.'),
       tf.assert_near(
           tf.matrix_diag_part(x),
           tf.cast(1., x.dtype.base_dtype),
           message='Self-correlations must be = 1.'),
       tf.assert_near(
           x, tf.matrix_transpose(x),
           message='Correlation matrices must be symmetric')
   ]
   with tf.control_dependencies(checks):
     return tf.identity(x)
开发者ID:asudomoeva,项目名称:probability,代码行数:22,代码来源:lkj.py


注:本文中的tensorflow.matrix_transpose函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。