当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.batch_matmul函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.batch_matmul函数的典型用法代码示例。如果您正苦于以下问题:Python batch_matmul函数的具体用法?Python batch_matmul怎么用?Python batch_matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了batch_matmul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _batch_sqrt_solve

  def _batch_sqrt_solve(self, rhs):
    # Recall the square root of this operator is M + VDV^T.
    # The Woodbury formula gives:
    # (M + VDV^T)^{-1}
    # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1}
    # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
    # where C is the capacitance matrix.
    m = self._operator
    v = self._v
    cchol = self._chol_capacitance(batch_mode=True)

    # The operators will use batch/singleton mode automatically.  We don't
    # override.
    # M^{-1} rhs
    minv_rhs = m.solve(rhs)
    # V^T M^{-1} rhs
    vt_minv_rhs = math_ops.batch_matmul(v, minv_rhs, adj_x=True)
    # C^{-1} V^T M^{-1} rhs
    cinv_vt_minv_rhs = linalg_ops.batch_cholesky_solve(cchol, vt_minv_rhs)
    # V C^{-1} V^T M^{-1} rhs
    v_cinv_vt_minv_rhs = math_ops.batch_matmul(v, cinv_vt_minv_rhs)
    # M^{-1} V C^{-1} V^T M^{-1} rhs
    minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs)

    # M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
    return minv_rhs - minv_v_cinv_vt_minv_rhs
开发者ID:10imaging,项目名称:tensorflow,代码行数:26,代码来源:operator_pd_vdvt_update.py

示例2: _BatchMatrixInverseGrad

def _BatchMatrixInverseGrad(op, grad):
  """Gradient for BatchMatrixInverse."""
  ainv = op.outputs[0]
  return -math_ops.batch_matmul(
      ainv,
      math_ops.batch_matmul(grad, ainv, adj_y=True),
      adj_x=True)
开发者ID:bradg19,项目名称:tensor,代码行数:7,代码来源:linalg_grad.py

示例3: _overdetermined

  def _overdetermined(op, grad):
    """Gradients for the overdetermined case of MatrixSolveLs.

    This is the backprop for the solution to the normal equations of the first
    kind:
       X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
    which solve the least squares problem
       min ||A * X - B||_F^2 + lambda ||X||_F^2.
    """
    a = op.inputs[0]
    b = op.inputs[1]
    l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
    x = op.outputs[0]
    a_shape = array_ops.shape(a)
    batch_shape = a_shape[:-2]
    n = a_shape[-1]

    identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
    gramian = math_ops.batch_matmul(
        a, a, adj_x=True) + l2_regularizer * identity
    chol = linalg_ops.cholesky(gramian)
    # Temporary z = (A^T * A + lambda * I)^{-1} * grad.
    z = linalg_ops.cholesky_solve(chol, grad)
    xzt = math_ops.batch_matmul(x, z, adj_y=True)
    zx_sym = xzt + array_ops.matrix_transpose(xzt)
    grad_a = -math_ops.batch_matmul(a, zx_sym) + math_ops.batch_matmul(
        b, z, adj_y=True)
    grad_b = math_ops.batch_matmul(a, z)
    return (grad_a, grad_b, None)
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:29,代码来源:linalg_grad.py

示例4: matmul

 def matmul(self, x, name='matmul'):
   """Left (batch) matrix multiplication of `x` by this operator."""
   chol = self._chol
   with ops.name_scope(self.name):
     with ops.op_scope(self.inputs, name):
       a_times_x = math_ops.batch_matmul(chol, x, adj_x=True)
       return math_ops.batch_matmul(chol, a_times_x)
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:7,代码来源:operator_pd_cholesky.py

示例5: _BatchMatrixSolveGrad

def _BatchMatrixSolveGrad(op, grad):
  """Gradient for BatchMatrixSolve."""
  a = op.inputs[0]
  c = op.outputs[0]
  # TODO(rmlarsen): Replace the following two lines with
  # a single call to batch_matrix_solve after adding
  # in an option to solve for A^T X = Y.
  ainv = linalg_ops.batch_matrix_inverse(a)
  grad_b = math_ops.batch_matmul(ainv, grad, adj_x=True)
  grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
  return (grad_a, grad_b)
开发者ID:Billlee1231,项目名称:tensorflow,代码行数:11,代码来源:linalg_grad.py

示例6: _MatrixSolveGrad

def _MatrixSolveGrad(op, grad):
    """Gradient for MatrixSolve."""
    a = op.inputs[0]
    adjoint_a = op.get_attr("adjoint")
    c = op.outputs[0]
    grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
    if adjoint_a:
        grad_a = -math_ops.batch_matmul(c, grad_b, adj_y=True)
    else:
        grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
    return (grad_a, grad_b)
开发者ID:eerwitt,项目名称:tensorflow,代码行数:11,代码来源:linalg_grad.py

示例7: _test1

def _test1(op, grad_e, grad_v):
    """Gradient for SelfAdjointEigV2 derived with Joan with no adjustment for subspace"""
    e = op.outputs[0]
    v = op.outputs[1]
    #dim = v.get_shape()
    with ops.control_dependencies([grad_e.op, grad_v.op]):
        if grad_v is not None:  
            E = array_ops.diag(e)
            v_proj = array_ops.slice(v, [0,0], [20,2])
            grad_grassman = grad_v - math_ops.batch_matmul(math_ops.batch_matmul(v_proj, array_ops.transpose(v_proj)), grad_v)
            grad_a = math_ops.batch_matmul(grad_grassman, math_ops.batch_matmul(E, array_ops.transpose(grad_v)))+math_ops.batch_matmul(grad_v, math_ops.batch_matmul(E, array_ops.transpose(grad_grassman)))
    return grad_a
开发者ID:lishali,项目名称:clusternet,代码行数:12,代码来源:r_array_learn.py

示例8: _batch_sqrt_matmul

  def _batch_sqrt_matmul(self, x, transpose_x=False):
    v = self._v
    m = self._operator
    d = self._diag_operator
    # The operators call the appropriate matmul/batch_matmul automatically.  We
    # cannot override.
    # batch_matmul is defined as:  x * y, so adj_x and adj_y are the ways to
    # transpose the left and right.
    mx = m.matmul(x, transpose_x=transpose_x)
    vt_x = math_ops.batch_matmul(v, x, adj_x=True, adj_y=transpose_x)
    d_vt_x = d.matmul(vt_x)
    v_d_vt_x = math_ops.batch_matmul(v, d_vt_x)

    return mx + v_d_vt_x
开发者ID:10imaging,项目名称:tensorflow,代码行数:14,代码来源:operator_pd_vdvt_update.py

示例9: _BatchMatrixSolveGrad

def _BatchMatrixSolveGrad(op, grad):
  """Gradient for BatchMatrixSolve."""
  a = op.inputs[0]
  c = op.outputs[0]
  grad_b = linalg_ops.batch_matrix_solve(a, grad, adjoint=True)
  grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
  return (grad_a, grad_b)
开发者ID:AlKavaev,项目名称:tensorflow,代码行数:7,代码来源:linalg_grad.py

示例10: variance

    def variance(self, name="variance"):
        """Variance of the Wishart distribution.

    This function should not be confused with the covariance of the Wishart. The
    covariance matrix would have shape `q x q` where,
    `q = dimension * (dimension+1) / 2`
    and having elements corresponding to some mapping from a lower-triangular
    matrix to a vector-space.

    This function returns the diagonal of the Covariance matrix but shaped
    as a `dimension x dimension` matrix.

    Args:
      name: The name of this op.

    Returns:
      variance: `Tensor` of dtype `self.dtype`.
    """
        with ops.name_scope(self.name):
            with ops.name_scope(name, values=list(self.inputs.values())):
                x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
                d = array_ops.expand_dims(array_ops.batch_matrix_diag_part(x), -1)
                v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
                if self.cholesky_input_output_matrices:
                    return linalg_ops.batch_cholesky(v)
                else:
                    return v
开发者ID:damienmg,项目名称:tensorflow,代码行数:27,代码来源:wishart.py

示例11: _variance

 def _variance(self):
     scale = self.alpha_sum * math_ops.sqrt(1.0 + self.alpha_sum)
     alpha = self.alpha / scale
     outer_prod = -math_ops.batch_matmul(
         array_ops.expand_dims(alpha, dim=-1), array_ops.expand_dims(alpha, dim=-2)  # column
     )  # row
     return array_ops.batch_matrix_set_diag(outer_prod, alpha * (self.alpha_sum / scale - alpha))
开发者ID:yxiong,项目名称:tensorflow,代码行数:7,代码来源:dirichlet.py

示例12: _variance

 def _variance(self):
   x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
   d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
   v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
   if self.cholesky_input_output_matrices:
     return linalg_ops.cholesky(v)
   return v
开发者ID:KalraA,项目名称:tensorflow,代码行数:7,代码来源:wishart.py

示例13: _variance

 def _variance(self):
   p = self.p * array_ops.expand_dims(array_ops.ones_like(self.n), -1)
   outer_prod = math_ops.batch_matmul(
       array_ops.expand_dims(self._mean_val, -1),
       array_ops.expand_dims(p, -2))
   return array_ops.batch_matrix_set_diag(
       -outer_prod, self._mean_val - self._mean_val * p)
开发者ID:apollos,项目名称:tensorflow,代码行数:7,代码来源:multinomial.py

示例14: _MatrixTriangularSolveGrad

def _MatrixTriangularSolveGrad(op, grad):
    """Gradient for MatrixTriangularSolve."""
    a = op.inputs[0]
    adjoint_a = op.get_attr("adjoint")
    lower_a = op.get_attr("lower")
    c = op.outputs[0]
    grad_b = linalg_ops.matrix_triangular_solve(a, grad, lower=lower_a, adjoint=not adjoint_a)
    if adjoint_a:
        grad_a = -math_ops.batch_matmul(c, grad_b, adj_y=True)
    else:
        grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True)
    if lower_a:
        grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
    else:
        grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
    return (grad_a, grad_b)
开发者ID:eerwitt,项目名称:tensorflow,代码行数:16,代码来源:linalg_grad.py

示例15: _sample_n

  def _sample_n(self, n, seed):
    batch_shape = self.batch_shape()
    event_shape = self.event_shape()
    batch_ndims = array_ops.shape(batch_shape)[0]

    ndims = batch_ndims + 3  # sample_ndims=1, event_ndims=2
    shape = array_ops.concat(0, ((n,), batch_shape, event_shape))

    # Complexity: O(nbk^2)
    x = random_ops.random_normal(shape=shape,
                                 mean=0.,
                                 stddev=1.,
                                 dtype=self.dtype,
                                 seed=seed)

    # Complexity: O(nbk)
    # This parametrization is equivalent to Chi2, i.e.,
    # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
    g = random_ops.random_gamma(shape=(n,),
                                alpha=self._multi_gamma_sequence(
                                    0.5 * self.df, self.dimension),
                                beta=0.5,
                                dtype=self.dtype,
                                seed=distribution_util.gen_new_seed(
                                    seed, "wishart"))

    # Complexity: O(nbk^2)
    x = array_ops.matrix_band_part(x, -1, 0)  # Tri-lower.

    # Complexity: O(nbk)
    x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))

    # Make batch-op ready.
    # Complexity: O(nbk^2)
    perm = array_ops.concat(0, (math_ops.range(1, ndims), (0,)))
    x = array_ops.transpose(x, perm)
    shape = array_ops.concat(0, (batch_shape, (event_shape[0], -1)))
    x = array_ops.reshape(x, shape)

    # Complexity: O(nbM) where M is the complexity of the operator solving a
    # vector system.  E.g., for OperatorPDDiag, each matmul is O(k^2), so
    # this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
    # O(k^3) so this step has complexity O(nbk^3).
    x = self.scale_operator_pd.sqrt_matmul(x)

    # Undo make batch-op ready.
    # Complexity: O(nbk^2)
    shape = array_ops.concat(0, (batch_shape, event_shape, (n,)))
    x = array_ops.reshape(x, shape)
    perm = array_ops.concat(0, ((ndims-1,), math_ops.range(0, ndims-1)))
    x = array_ops.transpose(x, perm)

    if not self.cholesky_input_output_matrices:
      # Complexity: O(nbk^3)
      x = math_ops.batch_matmul(x, x, adj_y=True)

    return x
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:57,代码来源:wishart.py


注:本文中的tensorflow.python.ops.math_ops.batch_matmul函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。