当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.broadcast_to函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.broadcast_to函数的典型用法代码示例。如果您正苦于以下问题:Python broadcast_to函数的具体用法?Python broadcast_to怎么用?Python broadcast_to使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了broadcast_to函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: benchmarkBatchMatMulBroadcast

  def benchmarkBatchMatMulBroadcast(self):
    for (a_shape, b_shape) in self.shape_pairs:
      with compat.forward_compatibility_horizon(2019, 4, 26):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/cpu:0"):
          matrix_a = variables.Variable(
              GetRandomNormalInput(a_shape, np.float32))
          matrix_b = variables.Variable(
              GetRandomNormalInput(b_shape, np.float32))
          variables.global_variables_initializer().run()

          # Use batch matmul op's internal broadcasting.
          self.run_op_benchmark(
              sess,
              math_ops.matmul(matrix_a, matrix_b),
              min_iters=50,
              name="batch_matmul_cpu_{}_{}".format(a_shape, b_shape))

          # Manually broadcast the input matrices using the broadcast_to op.
          broadcasted_batch_shape = array_ops.broadcast_static_shape(
              matrix_a.shape[:-2], matrix_b.shape[:-2])
          broadcasted_a_shape = broadcasted_batch_shape.concatenate(
              matrix_a.shape[-2:])
          broadcasted_b_shape = broadcasted_batch_shape.concatenate(
              matrix_b.shape[-2:])
          self.run_op_benchmark(
              sess,
              math_ops.matmul(
                  array_ops.broadcast_to(matrix_a, broadcasted_a_shape),
                  array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),
              min_iters=50,
              name="batch_matmul_manual_broadcast_cpu_{}_{}".format(
                  a_shape, b_shape))
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:batch_matmul_op_test.py

示例2: testBroadcastScalarToNonScalar

 def testBroadcastScalarToNonScalar(self):
   with self.session(use_gpu=True):
     x = np.array(1.0, dtype=np.float)
     v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
                                                               1, 1, 1])
     v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1])
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:7,代码来源:broadcast_to_ops_test.py

示例3: testBroadcastToBasic

 def testBroadcastToBasic(self):
   for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
     with self.test_session(use_gpu=True):
       x = np.array([1, 2, 3], dtype=dtype)
       v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
       v_np = np.broadcast_to(x, [3, 3])
       self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:broadcast_to_ops_test.py

示例4: testBroadcastToBadOutputShape

 def testBroadcastToBadOutputShape(self):
   with context.eager_mode():
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Unable to broadcast tensor of shape"):
       self.evaluate(
           array_ops.broadcast_to(
               constant_op.constant([0, 1]), constant_op.constant([2, 1])))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:broadcast_to_ops_test.py

示例5: testBroadcastToShapeLargerDim2

 def testBroadcastToShapeLargerDim2(self):
   input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
   output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
   with self.cached_session(use_gpu=True):
     x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
     v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
     v_np = np.broadcast_to(x, output_shape)
     self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:8,代码来源:broadcast_to_ops_test.py

示例6: _broadcast_to_uniform_shape

def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions):
  """Broadcasts rt_input to the uniform shape `shape`."""
  if isinstance(rt_input, ragged_tensor.RaggedTensor):
    raise ValueError('Incompatible with shape: ragged rank mismatch')
  if broadcast_inner_dimensions:
    return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes)
  else:
    return rt_input
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:ragged_tensor_shape.py

示例7: testGradientForScalar

 def testGradientForScalar(self):
   x = constant_op.constant(1, dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [2, 4, 3])
   out = 2 * v
   with self.cached_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(), out,
                                                   out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:8,代码来源:broadcast_to_ops_test.py

示例8: testGradientWithBroadcastAllDimensions

 def testGradientWithBroadcastAllDimensions(self):
   x = constant_op.constant([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [5, 4, 6])
   out = 2 * v
   with self.test_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:8,代码来源:broadcast_to_ops_test.py

示例9: _verifyLu

  def _verifyLu(self, x, output_idx_type=dtypes.int64):
    # Verify that Px = LU.
    lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)

    # Prepare the lower factor of shape num_rows x num_rows
    lu_shape = np.array(lu.shape.as_list())
    batch_shape = lu_shape[:-2]
    num_rows = lu_shape[-2]
    num_cols = lu_shape[-1]

    lower = array_ops.matrix_band_part(lu, -1, 0)

    if num_rows > num_cols:
      eye = linalg_ops.eye(
          num_rows, batch_shape=batch_shape, dtype=lower.dtype)
      lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
    elif num_rows < num_cols:
      lower = lower[..., :num_rows]

    # Fill the diagonal with ones.
    ones_diag = array_ops.ones(
        np.append(batch_shape, num_rows), dtype=lower.dtype)
    lower = array_ops.matrix_set_diag(lower, ones_diag)

    # Prepare the upper factor.
    upper = array_ops.matrix_band_part(lu, 0, -1)

    verification = math_ops.matmul(lower, upper)

    # Permute the rows of product of the Cholesky factors.
    if num_rows > 0:
      # Reshape the product of the triangular factors and permutation indices
      # to a single batch dimension. This makes it easy to apply
      # invert_permutation and gather_nd ops.
      perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
      verification_reshaped = array_ops.reshape(verification,
                                                [-1, num_rows, num_cols])
      # Invert the permutation in each batch.
      inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
                                        perm_reshaped)
      batch_size = perm_reshaped.shape.as_list()[0]
      # Prepare the batch indices with the same shape as the permutation.
      # The corresponding batch index is paired with each of the `num_rows`
      # permutation indices.
      batch_indices = math_ops.cast(
          array_ops.broadcast_to(
              math_ops.range(batch_size)[:, None], perm_reshaped.shape),
          dtype=output_idx_type)
      permuted_verification_reshaped = array_ops.gather_nd(
          verification_reshaped,
          array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))

      # Reshape the verification matrix back to the original shape.
      verification = array_ops.reshape(permuted_verification_reshaped,
                                       lu_shape)

    self._verifyLuBase(x, lower, upper, perm, verification,
                       output_idx_type)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:lu_op_test.py

示例10: testGradientWithIncreasingRank

 def testGradientWithIncreasingRank(self):
   x = constant_op.constant([[1], [2]],
                            dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [5, 2, 3])
   out = 2 * v
   with self.test_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:9,代码来源:broadcast_to_ops_test.py

示例11: testGradientWithSameRank

 def testGradientWithSameRank(self):
   x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
                            dtype=dtypes.float32)
   v = array_ops.broadcast_to(x, [2, 5, 3])
   out = 2 * v
   with self.test_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:9,代码来源:broadcast_to_ops_test.py

示例12: testBroadcastToShape

 def testBroadcastToShape(self):
   for input_dim in range(1, 6):
     for output_dim in range(input_dim, 6):
       with self.test_session(use_gpu=True):
         input_shape = [2] * input_dim
         output_shape = [2] * output_dim
         x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
         v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
         v_np = np.broadcast_to(x, output_shape)
         self.assertAllEqual(v_tf.eval(), v_np)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:10,代码来源:broadcast_to_ops_test.py

示例13: testGradientForScalar

 def testGradientForScalar(self):
   # TODO(alextp): There is a bug with broadcast_to on GPU from scalars,
   # hence we make this test cpu-only.
   with ops.device("cpu:0"):
     x = constant_op.constant(1, dtype=dtypes.float32)
     v = array_ops.broadcast_to(x, [2, 4, 3])
     out = 2 * v
     with self.test_session():
       err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                     out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:11,代码来源:broadcast_to_ops_test.py

示例14: testGradientWithLargeDim

 def testGradientWithLargeDim(self):
   input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
   output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
   x = constant_op.constant(np.array(np.random.randn(*input_shape),
                                     dtype=np.float32))
   v = array_ops.broadcast_to(x, output_shape)
   out = 2 * v
   with self.cached_session():
     err = gradient_checker.compute_gradient_error(x, x.get_shape(),
                                                   out, out.get_shape())
   self.assertLess(err, 1e-4)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:11,代码来源:broadcast_to_ops_test.py

示例15: testBroadcastToShapeTypeAndInference

 def testBroadcastToShapeTypeAndInference(self):
   for dtype in [dtypes.int32, dtypes.int64]:
     with self.test_session(use_gpu=True):
       x = np.array([1, 2, 3])
       v_tf = array_ops.broadcast_to(
           constant_op.constant(x),
           constant_op.constant([3, 3], dtype=dtype))
       shape = v_tf.get_shape().as_list()
       v_np = np.broadcast_to(x, [3, 3])
       self.assertAllEqual(v_tf.eval(), v_np)
       # check shape inference when shape input is constant
       self.assertAllEqual(shape, v_np.shape)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:12,代码来源:broadcast_to_ops_test.py


注:本文中的tensorflow.python.ops.array_ops.broadcast_to函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。