当前位置: 首页>>代码示例>>Python>>正文


Python linalg_impl.adjoint函数代码示例

本文整理汇总了Python中tensorflow.python.ops.linalg.linalg_impl.adjoint函数的典型用法代码示例。如果您正苦于以下问题:Python adjoint函数的具体用法?Python adjoint怎么用?Python adjoint使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了adjoint函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _QrGrad

def _QrGrad(op, dq, dr):
  """Gradient for Qr."""
  q, r = op.outputs
  if q.dtype.is_complex:
    raise NotImplementedError("QrGrad not implemented for dtype: %s" % q.dtype)
  if (r.shape.ndims is None or r.shape.as_list()[-2] is None or
      r.shape.as_list()[-1] is None):
    raise NotImplementedError("QrGrad not implemented with dynamic shapes.")
  if r.shape.dims[-2].value != r.shape.dims[-1].value:
    raise NotImplementedError("QrGrad not implemented when ncols > nrows "
                              "or full_matrices is true and ncols != nrows.")

  qdq = math_ops.matmul(q, dq, adjoint_a=True)
  qdq_ = qdq - _linalg.adjoint(qdq)
  rdr = math_ops.matmul(r, dr, adjoint_b=True)
  rdr_ = rdr - _linalg.adjoint(rdr)
  tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)

  def _TriangularSolve(x, r):
    """Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
    return _linalg.adjoint(
        linalg_ops.matrix_triangular_solve(
            r, _linalg.adjoint(x), lower=False, adjoint=False))

  grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
  grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
  return grad_a + grad_b
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:27,代码来源:linalg_grad.py

示例2: _test_matmul

 def _test_matmul(self, with_batch):
   for use_placeholder in self._use_placeholder_options:
     for build_info in self._operator_build_infos:
       # If batch dimensions are omitted, but there are
       # no batch dimensions for the linear operator, then
       # skip the test case. This is already checked with
       # with_batch=True.
       if not with_batch and len(build_info.shape) <= 2:
         continue
       for dtype in self._dtypes_to_test:
         for adjoint in self._adjoint_options:
           for adjoint_arg in self._adjoint_arg_options:
             with self.session(graph=ops.Graph()) as sess:
               sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
               operator, mat = self._operator_and_matrix(
                   build_info, dtype, use_placeholder=use_placeholder)
               x = self._make_x(
                   operator, adjoint=adjoint, with_batch=with_batch)
               # If adjoint_arg, compute A X^H^H = A X.
               if adjoint_arg:
                 op_matmul = operator.matmul(
                     linalg.adjoint(x),
                     adjoint=adjoint,
                     adjoint_arg=adjoint_arg)
               else:
                 op_matmul = operator.matmul(x, adjoint=adjoint)
               mat_matmul = linear_operator_util.matmul_with_broadcast(
                   mat, x, adjoint_a=adjoint)
               if not use_placeholder:
                 self.assertAllEqual(op_matmul.get_shape(),
                                     mat_matmul.get_shape())
               op_matmul_v, mat_matmul_v = sess.run(
                   [op_matmul, mat_matmul])
               self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:34,代码来源:linear_operator_test_util.py

示例3: _test_solve

 def _test_solve(self, with_batch):
   for use_placeholder in self._use_placeholder_options:
     for build_info in self._operator_build_infos:
       # If batch dimensions are omitted, but there are
       # no batch dimensions for the linear operator, then
       # skip the test case. This is already checked with
       # with_batch=True.
       if not with_batch and len(build_info.shape) <= 2:
         continue
       for dtype in self._dtypes_to_test:
         for adjoint in self._adjoint_options:
           for adjoint_arg in self._adjoint_arg_options:
             with self.test_session(graph=ops.Graph()) as sess:
               sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
               operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                   build_info, dtype, use_placeholder=use_placeholder)
               rhs = self._make_rhs(
                   operator, adjoint=adjoint, with_batch=with_batch)
               # If adjoint_arg, solve A X = (rhs^H)^H = rhs.
               if adjoint_arg:
                 op_solve = operator.solve(
                     linalg.adjoint(rhs),
                     adjoint=adjoint,
                     adjoint_arg=adjoint_arg)
               else:
                 op_solve = operator.solve(
                     rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
               mat_solve = linear_operator_util.matrix_solve_with_broadcast(
                   mat, rhs, adjoint=adjoint)
               if not use_placeholder:
                 self.assertAllEqual(op_solve.get_shape(),
                                     mat_solve.get_shape())
               op_solve_v, mat_solve_v = sess.run(
                   [op_solve, mat_solve], feed_dict=feed_dict)
               self.assertAC(op_solve_v, mat_solve_v)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:35,代码来源:linear_operator_test_util.py

示例4: _test_matmul

 def _test_matmul(self, with_batch):
   for use_placeholder in self._use_placeholder_options:
     for build_info in self._operator_build_infos:
       for dtype in self._dtypes_to_test:
         for adjoint in self._adjoint_options:
           for adjoint_arg in self._adjoint_arg_options:
             with self.test_session(graph=ops.Graph()) as sess:
               sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
               operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                   build_info, dtype, use_placeholder=use_placeholder)
               x = self._make_x(
                   operator, adjoint=adjoint, with_batch=with_batch)
               # If adjoint_arg, compute A X^H^H = A X.
               if adjoint_arg:
                 op_matmul = operator.matmul(
                     linalg.adjoint(x),
                     adjoint=adjoint,
                     adjoint_arg=adjoint_arg)
               else:
                 op_matmul = operator.matmul(x, adjoint=adjoint)
               mat_matmul = linear_operator_util.matmul_with_broadcast(
                   mat, x, adjoint_a=adjoint)
               if not use_placeholder:
                 self.assertAllEqual(op_matmul.get_shape(),
                                     mat_matmul.get_shape())
               op_matmul_v, mat_matmul_v = sess.run(
                   [op_matmul, mat_matmul], feed_dict=feed_dict)
               self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:28,代码来源:linear_operator_test_util.py

示例5: _matmul

  def _matmul(self, x, adjoint=False, adjoint_arg=False):
    if self._assert_proper_shapes:
      x = linalg.adjoint(x) if adjoint_arg else x
      aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
      x = control_flow_ops.with_dependencies([aps], x)
    if self.is_square:
      # Note that adjoint has no effect since this matrix is self-adjoint.
      if adjoint_arg:
        output_shape = array_ops.concat([
            array_ops.shape(x)[:-2],
            [array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0)
      else:
        output_shape = array_ops.shape(x)

      return self._possibly_broadcast_batch_shape(
          array_ops.zeros(shape=output_shape, dtype=x.dtype))

    x_shape = array_ops.shape(x)
    n = self._num_columns if adjoint else self._num_rows
    m = x_shape[-2] if adjoint_arg else x_shape[-1]

    output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0)

    zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype)
    return self._possibly_broadcast_batch_shape(zeros)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:linear_operator_zeros.py

示例6: test_solve

 def test_solve(self):
   self._skip_if_tests_to_skip_contains("solve")
   for use_placeholder in self._use_placeholder_options:
     for shape in self._shapes_to_test:
       for dtype in self._dtypes_to_test:
         for adjoint in self._adjoint_options:
           for adjoint_arg in self._adjoint_arg_options:
             with self.test_session(graph=ops.Graph()) as sess:
               sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
               operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                   shape, dtype, use_placeholder=use_placeholder)
               rhs = self._make_rhs(operator, adjoint=adjoint)
               # If adjoint_arg, solve A X = (rhs^H)^H = rhs.
               if adjoint_arg:
                 op_solve = operator.solve(
                     linalg.adjoint(rhs),
                     adjoint=adjoint,
                     adjoint_arg=adjoint_arg)
               else:
                 op_solve = operator.solve(
                     rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
               mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint)
               if not use_placeholder:
                 self.assertAllEqual(op_solve.get_shape(),
                                     mat_solve.get_shape())
               op_solve_v, mat_solve_v = sess.run(
                   [op_solve, mat_solve], feed_dict=feed_dict)
               self.assertAC(op_solve_v, mat_solve_v)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:linear_operator_test_util.py

示例7: test_matmul

 def test_matmul(self):
   self._skip_if_tests_to_skip_contains("matmul")
   for use_placeholder in self._use_placeholder_options:
     for shape in self._shapes_to_test:
       for dtype in self._dtypes_to_test:
         for adjoint in self._adjoint_options:
           for adjoint_arg in self._adjoint_arg_options:
             with self.test_session(graph=ops.Graph()) as sess:
               sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
               operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                   shape, dtype, use_placeholder=use_placeholder)
               x = self._make_x(operator, adjoint=adjoint)
               # If adjoint_arg, compute A X^H^H = A X.
               if adjoint_arg:
                 op_matmul = operator.matmul(
                     linalg.adjoint(x),
                     adjoint=adjoint,
                     adjoint_arg=adjoint_arg)
               else:
                 op_matmul = operator.matmul(x, adjoint=adjoint)
               mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
               if not use_placeholder:
                 self.assertAllEqual(op_matmul.get_shape(),
                                     mat_matmul.get_shape())
               op_matmul_v, mat_matmul_v = sess.run(
                   [op_matmul, mat_matmul], feed_dict=feed_dict)
               self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:27,代码来源:linear_operator_test_util.py

示例8: _test_solve

 def _test_solve(self, with_batch):
   for use_placeholder in self._use_placeholder_options:
     for build_info in self._operator_build_infos:
       for dtype in self._dtypes_to_test:
         for adjoint in self._adjoint_options:
           for adjoint_arg in self._adjoint_arg_options:
             with self.test_session(graph=ops.Graph()) as sess:
               sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
               operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
                   build_info, dtype, use_placeholder=use_placeholder)
               rhs = self._make_rhs(
                   operator, adjoint=adjoint, with_batch=with_batch)
               # If adjoint_arg, solve A X = (rhs^H)^H = rhs.
               if adjoint_arg:
                 op_solve = operator.solve(
                     linalg.adjoint(rhs),
                     adjoint=adjoint,
                     adjoint_arg=adjoint_arg)
               else:
                 op_solve = operator.solve(
                     rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
               mat_solve = linear_operator_util.matrix_solve_with_broadcast(
                   mat, rhs, adjoint=adjoint)
               if not use_placeholder:
                 self.assertAllEqual(op_solve.get_shape(),
                                     mat_solve.get_shape())
               op_solve_v, mat_solve_v = sess.run(
                   [op_solve, mat_solve], feed_dict=feed_dict)
               self.assertAC(op_solve_v, mat_solve_v)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:29,代码来源:linear_operator_test_util.py

示例9: _matmul

 def _matmul(self, x, adjoint=False, adjoint_arg=False):
   # Note that adjoint has no effect since this matrix is self-adjoint.
   x = linalg.adjoint(x) if adjoint_arg else x
   if self._assert_proper_shapes:
     aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
     x = control_flow_ops.with_dependencies([aps], x)
   return self._possibly_broadcast_batch_shape(x)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_identity.py

示例10: _test_matmul_base

def _test_matmul_base(
    self,
    use_placeholder,
    shapes_info,
    dtype,
    adjoint,
    adjoint_arg,
    with_batch):
  # If batch dimensions are omitted, but there are
  # no batch dimensions for the linear operator, then
  # skip the test case. This is already checked with
  # with_batch=True.
  if not with_batch and len(shapes_info.shape) <= 2:
    return
  with self.session(graph=ops.Graph()) as sess:
    sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
    operator, mat = self.operator_and_matrix(
        shapes_info, dtype, use_placeholder=use_placeholder)
    x = self.make_x(
        operator, adjoint=adjoint, with_batch=with_batch)
    # If adjoint_arg, compute A X^H^H = A X.
    if adjoint_arg:
      op_matmul = operator.matmul(
          linalg.adjoint(x),
          adjoint=adjoint,
          adjoint_arg=adjoint_arg)
    else:
      op_matmul = operator.matmul(x, adjoint=adjoint)
    mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
    if not use_placeholder:
      self.assertAllEqual(op_matmul.get_shape(),
                          mat_matmul.get_shape())
    op_matmul_v, mat_matmul_v = sess.run(
        [op_matmul, mat_matmul])
    self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:aritratony,项目名称:tensorflow,代码行数:35,代码来源:linear_operator_test_util.py

示例11: _assert_self_adjoint

 def _assert_self_adjoint(self):
   dense = self._get_cached_dense_matrix()
   logging.warn(
       "Using (possibly slow) default implementation of assert_self_adjoint."
       "  Requires conversion to a dense matrix.")
   return check_ops.assert_equal(
       dense,
       linalg.adjoint(dense),
       message="Matrix was not equal to its adjoint.")
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:9,代码来源:linear_operator.py

示例12: _solve

 def _solve(self, rhs, adjoint=False, adjoint_arg=False):
   rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
   if adjoint:
     matrix = self._multiplier_matrix_conj
   else:
     matrix = self._multiplier_matrix
   if self._assert_proper_shapes:
     aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs)
     rhs = control_flow_ops.with_dependencies([aps], rhs)
   return rhs / matrix
开发者ID:AnishShah,项目名称:tensorflow,代码行数:10,代码来源:linear_operator_identity.py

示例13: _solve

  def _solve(self, rhs, adjoint=False, adjoint_arg=False):
    rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
    spectrum = self._conj_spectrum if adjoint else self._spectrum_complex

    rhs, spectrum = self._broadcast_batch_dims(rhs, spectrum)

    rhs_vb = self._vectorize_then_blockify(rhs)
    fft_rhs_vb = self._fft(rhs_vb)
    solution_vb = self._ifft(fft_rhs_vb / spectrum)
    x = self._unblockify_then_matricize(solution_vb)
    return math_ops.cast(x, self.dtype)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:linear_operator_circulant.py

示例14: test_adjoint

 def test_adjoint(self):
   with self.test_session(graph=ops.Graph()) as sess:
     sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
     operator, mat = self.operator_and_matrix(
         shapes_info, dtype, use_placeholder=use_placeholder)
     op_adjoint = operator.adjoint().to_dense()
     op_adjoint_h = operator.H.to_dense()
     mat_adjoint = linalg.adjoint(mat)
     op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
         [op_adjoint, op_adjoint_h, mat_adjoint])
     self.assertAC(mat_adjoint_v, op_adjoint_v)
     self.assertAC(mat_adjoint_v, op_adjoint_h_v)
开发者ID:aritratony,项目名称:tensorflow,代码行数:12,代码来源:linear_operator_test_util.py

示例15: _solve

 def _solve(self, rhs, adjoint=False, adjoint_arg=False):
   """Default implementation of _solve."""
   if self.is_square is False:
     raise NotImplementedError(
         "Solve is not yet implemented for non-square operators.")
   logging.warn(
       "Using (possibly slow) default implementation of solve."
       "  Requires conversion to a dense matrix and O(N^3) operations.")
   rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
   if self._can_use_cholesky():
     return linalg_ops.cholesky_solve(self._get_cached_chol(), rhs)
   return linalg_ops.matrix_solve(
       self._get_cached_dense_matrix(), rhs, adjoint=adjoint)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:13,代码来源:linear_operator.py


注:本文中的tensorflow.python.ops.linalg.linalg_impl.adjoint函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。