当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.matmul函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.matmul函数的典型用法代码示例。如果您正苦于以下问题:Python matmul函数的具体用法?Python matmul怎么用?Python matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了matmul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_inverse_update_ops

  def make_inverse_update_ops(self):
    """Create and return update ops corresponding to registered computations."""
    ops = []

    num_inverses = len(self._inverses_by_damping)
    matrix_power_registered = bool(self._matpower_by_exp_and_damping)
    use_eig = (
        self._eigendecomp or matrix_power_registered or
        num_inverses >= EIGENVALUE_DECOMPOSITION_THRESHOLD)

    if use_eig:
      self.register_eigendecomp()  # ensures self._eigendecomp is set
      eigenvalues, eigenvectors = self._eigendecomp  # pylint: disable=unpacking-non-sequence

      for damping, inv in self._inverses_by_damping.items():
        ops.append(
            inv.assign(
                math_ops.matmul(eigenvectors / (eigenvalues + damping),
                                array_ops.transpose(eigenvectors))))

      for (exp, damping), matpower in self._matpower_by_exp_and_damping.items():
        ops.append(
            matpower.assign(
                math_ops.matmul(eigenvectors *
                                (eigenvalues + damping)**exp,
                                array_ops.transpose(eigenvectors))))
      # These ops share computation and should be run on a single device.
      ops = [control_flow_ops.group(*ops)]
    else:
      for damping, inv in self._inverses_by_damping.items():
        ops.append(inv.assign(utils.posdef_inv(self._cov, damping)))

    return ops
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:33,代码来源:fisher_factors.py

示例2: _testDefaultGraphInThread

  def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
    with session.Session() as s:
      self.assertEqual(ops.get_default_graph(), s.graph)
      a = constant_op.constant(1.0, shape=[1, 2])
      b = constant_op.constant(2.0, shape=[2, 3])
      c = math_ops.matmul(a, b)
      v = variables.Variable(c, name='var_%d' % i)

      # Block here until all threads have constructed their graph.
      constructed_event.set()
      continue_event.wait()

      assign_c_to_v = state_ops.assign(v, c)
      v.initializer.run()
      assign_c_to_v.eval()
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      d = constant_op.constant(3.0, shape=[2, 3])
      e = math_ops.matmul(a, d)
      assign_e_to_v = state_ops.assign(v, e)
      e_val = e.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
      v_val = v.eval()
      self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
      s.run(assign_e_to_v)
      v_val = v.eval()
      self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
      self.assertEqual(ops.get_default_graph(), s.graph)
开发者ID:agouwin,项目名称:udacity_deep_learning_homework,代码行数:28,代码来源:session_test.py

示例3: testPotentialCycle

  def testPotentialCycle(self):
    graph1 = ops.Graph()
    with graph1.as_default():
      a = constant_op.constant(1.0, shape=[2, 2])
      b = constant_op.constant(2.0, shape=[2, 2])
      matmul = math_ops.matmul(a, b)
      with ops.name_scope("hidden1"):
        c = nn_ops.relu(matmul)
        d = constant_op.constant(3.0, shape=[2, 2])
        matmul = math_ops.matmul(c, d)

    orig_meta_graph, _ = meta_graph.export_scoped_meta_graph(
        export_scope="hidden1", graph=graph1)

    graph2 = ops.Graph()
    with graph2.as_default():
      with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
        meta_graph.import_scoped_meta_graph(
            orig_meta_graph, import_scope="new_hidden1")

      meta_graph.import_scoped_meta_graph(
          orig_meta_graph,
          import_scope="new_hidden1",
          input_map={
              "$unbound_inputs_MatMul": constant_op.constant(
                  4.0, shape=[2, 2])
          })
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:27,代码来源:meta_graph_test.py

示例4: testServerDefChanged

  def testServerDefChanged(self):
    """Update server def, and run ops on new cluster."""
    context.set_server_def(
        server_def=get_server_def(
            ALT_JOB_NAME,
            local_server_port=0,
            remote_server_addresses=[
                self._cached_server1_target, self._cached_server2_target
            ],
            task_index=0))

    with ops.device("job:%s/replica:0/task:1/device:CPU:0" % ALT_JOB_NAME):
      x1 = array_ops.ones([2, 2])
    y = math_ops.matmul(x1, x1)
    np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())

    # Set the server def back to JOB_NAME
    context.set_server_def(
        server_def=get_server_def(
            JOB_NAME,
            local_server_port=0,
            remote_server_addresses=[
                self._cached_server1_target, self._cached_server2_target
            ],
            task_index=0))

    with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
      x1 = array_ops.ones([2, 2])
    y = math_ops.matmul(x1, x1)
    np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:30,代码来源:remote_test.py

示例5: Test

  def Test(self):
    np_val = np.matrix(a_np_) * np.matrix(b_np_)

    use_gpu = True
    if a_np_.dtype is np.float16 and (
        not test_util.CudaSupportsHalfMatMulAndConv()):
      use_gpu = False
      print("Built without fp16 matmul support for Cuda, running test on CPU.")

    # Transpose and possibly conjugate a_np_ and b_np_ according to the
    # attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
    # results in a valid matrix multiplication and produces the same result as
    # np.matrix(a_np_) * np.matrix(b_np_)
    effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
    effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
    with self.cached_session() as sess, test_util.device(use_gpu):
      if use_static_shape_:
        a = constant_op.constant(effective_a_np)
        b = constant_op.constant(effective_b_np)
        res = math_ops.matmul(a, b, **kwargs_)
        tf_val = self.evaluate(res)
      else:
        a = array_ops.placeholder(a_np_.dtype)
        b = array_ops.placeholder(b_np_.dtype)
        res = math_ops.matmul(a, b, **kwargs_)
        tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})

    self.assertAllCloseAccordingToType(
        tf_val,
        np_val,
        float_rtol=2e-5,
        float_atol=2e-5,
        half_rtol=0.2,
        half_atol=0.2)
开发者ID:aeverall,项目名称:tensorflow,代码行数:34,代码来源:matmul_op_test.py

示例6: _MatrixInverseGrad

def _MatrixInverseGrad(op, grad):
  """Gradient for MatrixInverse."""
  ainv = op.outputs[0]
  return -math_ops.matmul(
      ainv,
      math_ops.matmul(grad, ainv, transpose_b=True),
      transpose_a=True)
开发者ID:bradg19,项目名称:tensor,代码行数:7,代码来源:linalg_grad.py

示例7: _symmetric_matrix_square_root

def _symmetric_matrix_square_root(mat, eps=1e-10):
  """Compute square root of a symmetric matrix.

  Note that this is different from an elementwise square root. We want to
  compute M' where M' = sqrt(mat) such that M' * M' = mat.

  Also note that this method **only** works for symmetric matrices.

  Args:
    mat: Matrix to take the square root of.
    eps: Small epsilon such that any element less than eps will not be square
      rooted to guard against numerical instability.

  Returns:
    Matrix square root of mat.
  """
  # Unlike numpy, tensorflow's return order is (s, u, v)
  s, u, v = linalg_ops.svd(mat)
  # sqrt is unstable around 0, just use 0 in such case
  si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
  # Note that the v returned by Tensorflow is v = V
  # (when referencing the equation A = U S V^T)
  # This is unlike Numpy which returns v = V^T
  return math_ops.matmul(
      math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
开发者ID:changchunli,项目名称:compare_gan,代码行数:25,代码来源:classifier_metrics_impl.py

示例8: compute_cov

def compute_cov(tensor, tensor_right=None, normalizer=None):
  """Compute the empirical second moment of the rows of a 2D Tensor.

  This function is meant to be applied to random matrices for which the true row
  mean is zero, so that the true second moment equals the true covariance.

  Args:
    tensor: A 2D Tensor.
    tensor_right: An optional 2D Tensor. If provided, this function computes
      the matrix product tensor^T * tensor_right instead of tensor^T * tensor.
    normalizer: optional scalar for the estimator (by default, the normalizer is
        the number of rows of tensor).

  Returns:
    A square 2D Tensor with as many rows/cols as the number of input columns.
  """
  if normalizer is None:
    normalizer = array_ops.shape(tensor)[0]
  if tensor_right is None:
    cov = (
        math_ops.matmul(tensor, tensor, transpose_a=True) / math_ops.cast(
            normalizer, tensor.dtype))
    return (cov + array_ops.transpose(cov)) / math_ops.cast(2.0, cov.dtype)
  else:
    return (math_ops.matmul(tensor, tensor_right, transpose_a=True) /
            math_ops.cast(normalizer, tensor.dtype))
开发者ID:andrewharp,项目名称:tensorflow,代码行数:26,代码来源:fisher_factors.py

示例9: _project_input

  def _project_input(self, inputs, c_prev, m_prev, with_c):
    """Fills in c_prev and m_prev with projected input, for input dimensions
    """
    conf = self._config

    if (inputs is not None and inputs.get_shape().with_rank(2)[1].value > 0
        and len(conf.inputs) > 0):
      if isinstance(inputs, tuple):
        if len(conf.inputs) != len(inputs):
          raise ValueError("Expect inputs as a tuple of {} "
                           "tensors".format(len(conf.inputs)))
        input_splits = inputs
      else:
        input_splits = array_ops.split(
          value=inputs, num_or_size_splits=len(conf.inputs), axis=1)
      input_sz = input_splits[0].get_shape().with_rank(2)[1].value

      for i, j in enumerate(conf.inputs):
        input_project_m = vs.get_variable(
          'project_m_{}'.format(j), [input_sz, conf.num_units],
          dtype=inputs.dtype)
        m_prev[j] = math_ops.matmul(input_splits[i], input_project_m)

        if with_c:
          input_project_c = vs.get_variable(
            'project_c_{}'.format(j), [input_sz, conf.num_units],
            dtype=inputs.dtype)
          c_prev[j] = math_ops.matmul(input_splits[i], input_project_c)
开发者ID:finardi,项目名称:tensorflow,代码行数:28,代码来源:grid_rnn_cell.py

示例10: testMinimizeSparseResourceVariable

 def testMinimizeSparseResourceVariable(self):
   for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
     with self.test_session():
       var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
       var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
       x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
       pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
       pred = math_ops.matmul(var0, x) + var1
       loss = pred * pred
       sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
       # TODO(apassos) calling initialize_resources on all resources here
       # doesn't work because the sessions and graph are reused across unit
       # tests and this would mean trying to reinitialize variables. Figure out
       # a long-term solution for this.
       resources.initialize_resources([var0, var1]).run()
       # Fetch params to validate initial values
       self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
       self.assertAllCloseAccordingToType([3.0], var1.eval())
       # Run 1 step of sgd
       sgd_op.run()
       # Validate updated params
       np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
       np_grad = 2 * np_pred
       self.assertAllCloseAccordingToType(
           [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
       self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:26,代码来源:gradient_descent_test.py

示例11: test_while_jacobian

  def test_while_jacobian(self):
    x = random_ops.random_uniform([1, 3])
    y = random_ops.random_uniform([3, 3])

    # out = x @ y @ y @ y @ y, where @ is matmul operator.
    _, out = control_flow_ops.while_loop(
        lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
        [0, x])

    def loop_fn(i):
      out_i = array_ops.gather(out, i, axis=1)
      return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])

    out = pfor_control_flow_ops.pfor(loop_fn, iters=3)

    # The above code does not work with tf.while_loop instead of pfor. So we
    # manually compute the expected output here.
    # Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
    expected_output = y
    for _ in range(3):
      expected_output = math_ops.matmul(expected_output, y)
    expected_output = array_ops.transpose(expected_output, [1, 0])

    with session.Session() as sess:
      out, expected = sess.run([out, expected_output])
      self.assertAllClose(expected, out)
开发者ID:aritratony,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py

示例12: test_mixing_eager_and_graph_tensors

 def test_mixing_eager_and_graph_tensors(self):
   with ops.Graph().as_default():
     x1 = array_ops.ones((3, 3))
   x2 = array_ops.ones((3, 3))
   self.assertIsInstance(x2, ops.EagerTensor)
   with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
     math_ops.matmul(x1, x2)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:7,代码来源:base_layer_test.py

示例13: runFiniteDifferences

    def runFiniteDifferences(self, shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64), scalarTest=False):
        with self.test_session(use_gpu=False):
            for shape in shapes:
                for batch in False, True:
                    for dtype in dtypes:
                        if not scalarTest:
                            x = constant_op.constant(np.random.randn(shape[0], shape[1]), dtype)
                            tensor = math_ops.matmul(x, array_ops.transpose(x)) / shape[0]
                        else:
                            # This is designed to be a faster test for larger matrices.
                            x = constant_op.constant(np.random.randn(), dtype)
                            R = constant_op.constant(np.random.randn(shape[0], shape[1]), dtype)
                            e = math_ops.mul(R, x)
                            tensor = math_ops.matmul(e, array_ops.transpose(e)) / shape[0]

                        # Inner-most matrices in tensor are positive definite.
                        if batch:
                            tensor = array_ops.tile(array_ops.expand_dims(tensor, 0), [4, 1, 1])
                        y = linalg_ops.cholesky(tensor)
                        if scalarTest:
                            y = math_ops.reduce_mean(y)
                        error = gradient_checker.compute_gradient_error(x, x._shape_as_list(), y, y._shape_as_list())
                        tf_logging.info("error = %f", error)
                        if dtype == dtypes_lib.float64:
                            self.assertLess(error, 1e-5)
                        else:
                            self.assertLess(error, 3e-3)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:27,代码来源:cholesky_op_test.py

示例14: _batch_sqrt_solve

  def _batch_sqrt_solve(self, rhs):
    # Recall the square root of this operator is M + VDV^T.
    # The Woodbury formula gives:
    # (M + VDV^T)^{-1}
    # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1}
    # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
    # where C is the capacitance matrix.
    m = self._operator
    v = self._v
    cchol = self._chol_capacitance(batch_mode=True)

    # The operators will use batch/singleton mode automatically.  We don't
    # override.
    # M^{-1} rhs
    minv_rhs = m.solve(rhs)
    # V^T M^{-1} rhs
    vt_minv_rhs = math_ops.matmul(v, minv_rhs, adjoint_a=True)
    # C^{-1} V^T M^{-1} rhs
    cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs)
    # V C^{-1} V^T M^{-1} rhs
    v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs)
    # M^{-1} V C^{-1} V^T M^{-1} rhs
    minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs)

    # M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
    return minv_rhs - minv_v_cinv_vt_minv_rhs
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:26,代码来源:operator_pd_vdvt_update.py

示例15: benchmarkBatchMatMulBroadcast

  def benchmarkBatchMatMulBroadcast(self):
    for (a_shape, b_shape) in self.shape_pairs:
      with compat.forward_compatibility_horizon(2019, 4, 26):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/cpu:0"):
          matrix_a = variables.Variable(
              GetRandomNormalInput(a_shape, np.float32))
          matrix_b = variables.Variable(
              GetRandomNormalInput(b_shape, np.float32))
          variables.global_variables_initializer().run()

          # Use batch matmul op's internal broadcasting.
          self.run_op_benchmark(
              sess,
              math_ops.matmul(matrix_a, matrix_b),
              min_iters=50,
              name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape))

          # Manually broadcast the input matrices using the broadcast_to op.
          broadcasted_batch_shape = array_ops.broadcast_static_shape(
              matrix_a.shape[:-2], matrix_b.shape[:-2])
          broadcasted_a_shape = broadcasted_batch_shape.concatenate(
              matrix_a.shape[-2:])
          broadcasted_b_shape = broadcasted_batch_shape.concatenate(
              matrix_b.shape[-2:])
          self.run_op_benchmark(
              sess,
              math_ops.matmul(
                  array_ops.broadcast_to(matrix_a, broadcasted_a_shape),
                  array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),
              min_iters=50,
              name="batch_matmul_manual_broadcast_cpu_{}_{}".format(
                  a_shape, b_shape))
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:batch_matmul_op_test.py


注:本文中的tensorflow.python.ops.math_ops.matmul函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。