当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.cumprod函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.cumprod函数的典型用法代码示例。如果您正苦于以下问题:Python cumprod函数的具体用法?Python cumprod怎么用?Python cumprod使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cumprod函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testAxisType

 def testAxisType(self):
   for dtype in self.valid_dtypes:
     x = np.arange(1, 6).reshape([5]).astype(dtype)
     for axis_dtype in self.axis_dtypes():
       with self.cached_session(), self.test_scope():
         p = array_ops.placeholder(x.dtype)
         axis = constant_op.constant(0, axis_dtype)
         math_ops.cumprod(x, axis).eval(feed_dict={p: x})
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:8,代码来源:scan_ops_test.py

示例2: testAxisType

 def testAxisType(self):
   for dtype in self.valid_dtypes:
     x = np.arange(1, 6).reshape([5]).astype(dtype)
     for axis_dtype in [dtypes.int64, dtypes.int32]:
       with self.cached_session(use_gpu=True):
         axis = constant_op.constant(0, axis_dtype)
         tf_out = math_ops.cumprod(x, axis).eval()
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:7,代码来源:scan_ops_test.py

示例3: _compareGradient

 def _compareGradient(self, shape, axis, exclusive, reverse):
   x = np.arange(1, 9).reshape(shape).astype(np.float64)
   with self.cached_session(use_gpu=True):
     t = ops.convert_to_tensor(x)
     result = math_ops.cumprod(t, axis, exclusive, reverse)
     jacob_t, jacob_n = gradient_checker.compute_gradient(
         t, shape, result, shape, x_init_value=x, delta=1)
   self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:8,代码来源:scan_ops_test.py

示例4: _compare

  def _compare(self, x, axis, exclusive, reverse):
    np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
    with self.cached_session(), self.test_scope():
      p = array_ops.placeholder(x.dtype)
      prod = math_ops.cumprod(p, axis, exclusive, reverse)
      tf_out = prod.eval(feed_dict={p: x})

    self.assertAllClose(np_out, tf_out)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:8,代码来源:scan_ops_test.py

示例5: _ProdGrad

def _ProdGrad(op, grad):
  """Gradient for Prod."""
  # The gradient can be expressed by dividing the product by each entry of the
  # input tensor, but this approach can't deal with zeros in the input.
  # Here, we avoid this problem by composing the output as a product of two
  # cumprod operations.

  input_shape = array_ops.shape(op.inputs[0])
  # Reshape reduction indices for the case where the parameter is a scalar
  reduction_indices = array_ops.reshape(op.inputs[1], [-1])

  # Expand grad to full input shape
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  grad = array_ops.tile(grad, tile_scaling)

  # Pack all reduced dimensions into a single one, so we can perform the
  # cumprod ops. If the reduction dims list is empty, it defaults to float32,
  # so we need to cast here.  We put all the shape-related ops on CPU to avoid
  # copying back and forth, and since listdiff is CPU only.
  with ops.device("/cpu:0"):
    rank = array_ops.rank(op.inputs[0])
    reduction_indices = (reduction_indices + rank) % rank
    reduced = math_ops.cast(reduction_indices, dtypes.int32)
    idx = math_ops.range(0, rank)
    other, _ = array_ops.setdiff1d(idx, reduced)
    perm = array_ops.concat([reduced, other], 0)
    reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
    other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
  permuted = array_ops.transpose(op.inputs[0], perm)
  permuted_shape = array_ops.shape(permuted)
  reshaped = array_ops.reshape(permuted, (reduced_num, other_num))

  # Calculate product, leaving out the current entry
  left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
  right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
  # For complex inputs, the gradient is in the conjugate direction.
  y = array_ops.reshape(math_ops.conj(left) * math_ops.conj(right),
                        permuted_shape)

  # Invert the transpose and reshape operations.
  # Make sure to set the statically known shape information through a reshape.
  out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
  return array_ops.reshape(out, input_shape), None
开发者ID:AnishShah,项目名称:tensorflow,代码行数:45,代码来源:math_grad.py

示例6: _CumprodGrad

def _CumprodGrad(op, grad):
  x = op.inputs[0]
  axis = op.inputs[1]
  reverse = op.get_attr("reverse")

  # TODO This fails when x contains 0 and should be fixed
  prod = math_ops.cumprod(x, axis=axis, reverse=reverse)
  out = math_ops.cumsum(prod * grad, axis=axis, reverse=(not reverse))
  return [out / x, None]
开发者ID:LaGuardia,项目名称:tensorflow,代码行数:9,代码来源:math_grad.py

示例7: _ProdGrad

def _ProdGrad(op, grad):
  """Gradient for Prod."""
  # The gradient can be expressed by dividing the product by each entry of the
  # input tensor, but this approach can't deal with zeros in the input.
  # Here, we avoid this problem by composing the output as a product of two
  # cumprod operations.

  input_shape = array_ops.shape(op.inputs[0])

  # Expand grad to full input shape
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  grad = array_ops.tile(grad, tile_scaling)

  # Pack all reduced dimensions into a single one, so we can perform the
  # cumprod ops. If the reduction dims list is empty, it defaults to float32,
  # so we need to cast here.
  reduced = math_ops.cast(op.inputs[1], dtypes.int32)
  idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
  other, _ = array_ops.listdiff(idx, reduced)
  perm = array_ops.concat(0, [reduced, other])
  reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
  other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
  permuted = array_ops.transpose(op.inputs[0], perm)
  permuted_shape = array_ops.shape(permuted)
  reshaped = array_ops.reshape(permuted, (reduced_num, other_num))

  # Calculate product, leaving out the current entry
  left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
  right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
  y = array_ops.reshape(left * right, permuted_shape)

  # Invert the transpose and reshape operations.
  # Make sure to set the statically known shape information through a reshape.
  out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
  return array_ops.reshape(out, input_shape), None
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:37,代码来源:math_grad.py

示例8: test_safe_cumprod

  def test_safe_cumprod(self):
    # Create some random test input
    test_input = np.random.uniform(size=(10, 20))

    for axis in [0, 1]:
      for exclusive in [True, False]:
        with self.test_session():
          # Compute cumprod with regular tf.cumprod
          cumprod_output = math_ops.cumprod(
              test_input, axis=axis, exclusive=exclusive).eval()
          # Compute cumprod with safe_cumprod
          safe_cumprod_output = wrapper.safe_cumprod(
              test_input, axis=axis, exclusive=exclusive).eval()
        for x, y in zip(cumprod_output.shape, safe_cumprod_output.shape):
          self.assertEqual(x, y)
        for x, y in zip(cumprod_output.flatten(),
                        safe_cumprod_output.flatten()):
          # Use assertAlmostEqual for the actual values due to floating point
          self.assertAlmostEqual(x, y, places=5)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:19,代码来源:attention_wrapper_test.py

示例9: testInvalidAxis

 def testInvalidAxis(self):
   x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
   input_tensor = ops.convert_to_tensor(x)
   with self.session(use_gpu=True):
     with self.assertRaisesWithPredicateMatch(
         errors_impl.InvalidArgumentError,
         lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
       math_ops.cumprod(input_tensor, -3).eval()
     with self.assertRaisesWithPredicateMatch(
         errors_impl.InvalidArgumentError,
         lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
       math_ops.cumprod(input_tensor, 2).eval()
     with self.assertRaisesWithPredicateMatch(
         errors_impl.InvalidArgumentError,
         lambda e: "axis must be a scalar" in str(e)):
       math_ops.cumprod(input_tensor, [0]).eval()
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:16,代码来源:scan_ops_test.py

示例10: _compare

  def _compare(self, x, axis, exclusive, reverse):
    np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
    with self.cached_session(use_gpu=True):
      tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()

    self.assertAllClose(np_out, tf_out)
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:6,代码来源:scan_ops_test.py

示例11: loop_fn

 def loop_fn(i):
   a = array_ops.gather(x, i)
   return math_ops.cumprod(
       a, axis=axis, exclusive=exclusive, reverse=reverse)
开发者ID:aritratony,项目名称:tensorflow,代码行数:4,代码来源:math_test.py

示例12: monotonic_attention

def monotonic_attention(p_choose_i, previous_attention, mode):
  """Compute monotonic attention distribution from choosing probabilities.

  Monotonic attention implies that the input sequence is processed in an
  explicitly left-to-right manner when generating the output sequence.  In
  addition, once an input sequence element is attended to at a given output
  timestep, elements occurring before it cannot be attended to at subsequent
  output timesteps.  This function generates attention distributions according
  to these assumptions.  For more information, see ``Online and Linear-Time
  Attention by Enforcing Monotonic Alignments''.

  Args:
    p_choose_i: Probability of choosing input sequence/memory element i.  Should
      be of shape (batch_size, input_sequence_length), and should all be in the
      range [0, 1].
    previous_attention: The attention distribution from the previous output
      timestep.  Should be of shape (batch_size, input_sequence_length).  For
      the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
      0] for all n in [0, ... batch_size - 1].
    mode: How to compute the attention distribution.  Must be one of
      'recursive', 'parallel', or 'hard'.
        * 'recursive' uses tf.scan to recursively compute the distribution.
          This is slowest but is exact, general, and does not suffer from
          numerical instabilities.
        * 'parallel' uses parallelized cumulative-sum and cumulative-product
          operations to compute a closed-form solution to the recurrence
          relation defining the attention distribution.  This makes it more
          efficient than 'recursive', but it requires numerical checks which
          make the distribution non-exact.  This can be a problem in particular
          when input_sequence_length is long and/or p_choose_i has entries very
          close to 0 or 1.
        * 'hard' requires that the probabilities in p_choose_i are all either 0
          or 1, and subsequently uses a more efficient and exact solution.

  Returns:
    A tensor of shape (batch_size, input_sequence_length) representing the
    attention distributions for each sequence in the batch.

  Raises:
    ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
  """
  # Force things to be tensors
  p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
  previous_attention = ops.convert_to_tensor(
      previous_attention, name="previous_attention")
  if mode == "recursive":
    # Use .shape[0].value when it's not None, or fall back on symbolic shape
    batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0]
    # Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
    shifted_1mp_choose_i = array_ops.concat(
        [array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
    # Compute attention distribution recursively as
    # q[i] = (1 - p_choose_i[i])*q[i - 1] + previous_attention[i]
    # attention[i] = p_choose_i[i]*q[i]
    attention = p_choose_i*array_ops.transpose(functional_ops.scan(
        # Need to use reshape to remind TF of the shape between loop iterations
        lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)),
        # Loop variables yz[0] and yz[1]
        [array_ops.transpose(shifted_1mp_choose_i),
         array_ops.transpose(previous_attention)],
        # Initial value of x is just zeros
        array_ops.zeros((batch_size,))))
  elif mode == "parallel":
    # safe_cumprod computes cumprod in logspace with numeric checks
    cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
    # Compute recurrence relation solution
    attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum(
        previous_attention /
        # Clip cumprod_1mp to avoid divide-by-zero
        clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1)
  elif mode == "hard":
    # Remove any probabilities before the index chosen last time step
    p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
    # Now, use exclusive cumprod to remove probabilities after the first
    # chosen index, like so:
    # p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
    # cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
    # Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
    attention = p_choose_i*math_ops.cumprod(
        1 - p_choose_i, axis=1, exclusive=True)
  else:
    raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
  return attention
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:83,代码来源:attention_wrapper.py

示例13: from_tensor

def from_tensor(tensor, lengths=None, padding=None, ragged_rank=1, name=None):
  """Converts a `Tensor` into a `RaggedTensor`.

  The set of absent/default values may be specified using a vector of lengths
  or a padding value (but not both).  If `lengths` is specified, then the
  output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`.
  If `padding` is specified, then any row *suffix* consisting entirely of
  `padding` will be excluded from the returned `RaggedTensor`.  If neither
  `lengths` nor `padding` is specified, then the returned `RaggedTensor` will
  have no absent/default values.

  Examples:

  ```python
  >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
  >>> ragged.from_tensor(dt).eval().tolist()
  [[5, 7, 0], [0, 3, 0], [6, 0, 0]]
  >>> ragged.from_tensor(dt, lengths=[2, 0, 3]).eval().tolist()
  [[5, 7], [], [6, 0, 0]]
  >>> ragged.from_tensor(dt, padding=0).eval().tolist()
  [[5, 7], [0, 3], [6]]
  ```

  Args:
    tensor: The `Tensor` to convert.  Must have rank `ragged_rank + 1` or
      higher.
    lengths: An optional set of row lengths, specified using a 1-D integer
      `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows in
      `tensor`).  If specified, then `output[row]` will contain
      `tensor[row][:lengths[row]]`.  Negative lengths are treated as zero.
    padding: An optional padding value.  If specified, then any row suffix
      consisting entirely of `padding` will be excluded from the returned
      RaggedTensor.  `padding` is a `Tensor` with the same dtype as `tensor`
      and with `shape=tensor.shape[ragged_rank + 1:]`.
    ragged_rank: Integer specifying the ragged rank for the returned
      `RaggedTensor`.  Must be greater than zero.
    name: A name prefix for the returned tensors (optional).

  Returns:
    A `RaggedTensor` with the specified `ragged_rank`.  The shape of the
    returned ragged tensor is compatible with the shape of `tensor`.
  Raises:
    ValueError: If both `lengths` and `padding` are specified.
  """
  if lengths is not None and padding is not None:
    raise ValueError('Specify lengths or padding, but not both')
  if not isinstance(ragged_rank, int):
    raise TypeError('ragged_rank expected int, got %r' % ragged_rank)
  if ragged_rank <= 0:
    raise ValueError('ragged_rank must be greater than 0; got %s' % ragged_rank)

  with ops.name_scope(name, 'RaggedFromTensor', [tensor, lengths, padding]):
    tensor = ops.convert_to_tensor(tensor, name='tensor')
    tensor.shape.with_rank_at_least(ragged_rank + 1)
    input_shape = array_ops.shape(tensor, out_type=dtypes.int64)
    ncols = input_shape[1]

    # Handle ragged_rank>1 via recursion:
    # If the output should have multiple ragged dimensions, then first
    # flatten the tensor to eliminate all but the last ragged dimension,
    # and recursively convert that flattened tensor.  Then add on the splits
    # for the dimensions that we flattened out.
    if ragged_rank > 1:
      # Flatten `tensor` to eliminate all but the last ragged dimension.
      new_shape = array_ops.concat(
          [constant_op.constant([-1], dtypes.int64), input_shape[ragged_rank:]],
          axis=0)
      flattened = array_ops.reshape(tensor, new_shape)
      # Recursively convert the flattened tensor.
      values = from_tensor(flattened, lengths, padding)
      # The total number of elements in each  dimension.  E.g., if
      # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
      dim_size = math_ops.cumprod(input_shape)
      # Construct splits tensors for the dimensions that were flattened.
      new_splits = [
          math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]
          for dim in range(1, ragged_rank)
      ]
      return ragged_factory_ops.from_nested_row_splits(values, new_splits)

    # If padding was specified, then use it to find row lengths.
    if padding is not None:
      padding = ops.convert_to_tensor(
          padding, name='padding', dtype=tensor.dtype)
      padding.shape.assert_is_compatible_with(tensor.shape[2:])

      # Find places where the padding is equal to the tensor.  (This will
      # broadcast `padding` across the outermost 2 dimensions of `tensor`,
      # so `has_default_value.shape = tensor.shape`.)
      has_default_value = math_ops.equal(padding, tensor)

      # If the padding isn't a scalar, then require that all values in the
      # padding match each item in the tensor.  After this block of code,
      # `has_default.shape = tensor.shape[:2]`.  (Unfortunately, we can't just
      # use reduce_all for both cases, becaue when you pass an empty `axis`
      # list to reduce_all, it reduces all axes; but we want it to reduce no
      # axes -- i.e., to be a no-op.)
      tensor_rank = array_ops.rank(tensor)
      reduce_axis = math_ops.range(2, tensor_rank)
      has_default = control_flow_ops.cond(
#.........这里部分代码省略.........
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:101,代码来源:ragged_conversion_ops.py

示例14: boolean_mask


#.........这里部分代码省略.........
    elif mask.shape.ndims == 0:
      raise ValueError('mask cannot be scalar.')

    # If mask is ragged, then recurse with a non-ragged mask.
    if ragged_tensor.is_ragged(mask):
      if not ragged_tensor.is_ragged(data):
        data = ragged_tensor.RaggedTensor.from_tensor(
            data, ragged_rank=mask.ragged_rank,
            row_splits_dtype=mask.row_splits.dtype)
      # Check that mask.nested_row_splits is a prefix of
      # data.nested_row_splits.
      splits_list = [
          mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank]
      ]
      with ops.control_dependencies(
          ragged_util.assert_splits_match(splits_list)):
        # Strip off ragged `splits` until `mask` is non-ragged.  Keep the splits
        # that we strip off in `splits`, so we can add them back on after
        # we recursively mask the non-ragged data.
        splits = []
        while ragged_tensor.is_ragged(mask):
          if mask.shape.ndims > 2:
            splits.append(mask.row_splits)
          else:
            # Count the number of True mask values in each row to find the
            # lengths of the filtered rows; then convert to splits.
            int_mask = ragged_functional_ops.map_flat_values(
                math_ops.cast, mask, dtype=row_splits_dtype)
            masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1)
            splits.append(ragged_util.lengths_to_splits(masked_row_lengths))
          mask = mask.values
          data = data.values

        # Recursively apply the nested non-ragged mask to the nested data.
        masked_values = boolean_mask(data, mask, keepdims)

        # Add the ragged `splits` back to the result.
        if keepdims:
          masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits(
              masked_values, splits, validate=False)

        return masked_values

    # If mask is non-ragged and has rank 1, and data is ragged, then build a
    # ragged tensor with the indicated rows.
    elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1:
      # Get the masked splits: first get the length of each row, then filter
      # out the rows that we are deleting, and convert that filtered set of
      # masks back to a splits tensor.
      lengths = data.row_lengths()
      masked_lengths = array_ops.boolean_mask(lengths, mask)
      masked_splits = ragged_util.lengths_to_splits(masked_lengths)

      # Get the masked values: first get row ids corresponding to each
      # value, then use tf.gather to build a boolean mask that's false for
      # values that come from rows that we are deleting, and use that mask to
      # construct the masked values tensor.
      segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits)
      segment_mask = array_ops.gather(mask, segment_ids)
      masked_values = boolean_mask(data.values, segment_mask, keepdims=False)

      return ragged_tensor.RaggedTensor.from_row_splits(masked_values,
                                                        masked_splits,
                                                        validate=False)

    # If mask is non-ragged and has rank>1, then convert it to be ragged,
    # with a ragged rank matching data.
    if ragged_tensor.is_ragged(data):
      mask = ragged_tensor.RaggedTensor.from_tensor(
          mask, ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1),
          row_splits_dtype=data.row_splits.dtype)
      return boolean_mask(data, mask, keepdims)

    # Otherwise, data and mask are both `Tensor`s.
    else:
      # Apply `boolean_mask` to get the masked values.
      masked_values = array_ops.boolean_mask(data, mask)

      if mask.shape.ndims >= 2 and keepdims:
        # Add the innermost ragged dimension.  For each innermost cell, get the
        # number of values it contains.  Then flatten that to get a list of
        # cell lengths, and convert it to splits.  Finally, combine the splits
        # and values to get the innermost ragged tensor.
        masked_lengths = math_ops.count_nonzero(mask, axis=-1,
                                                dtype=row_splits_dtype)
        flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1])
        masked_values = ragged_tensor.RaggedTensor.from_row_lengths(
            masked_values, flattened_masked_lengths, validate=False)

        # Wrap remaining ragged dimensions.
        if mask.shape.ndims > 2 and keepdims:
          mask_shape = array_ops.shape(mask, out_type=row_splits_dtype)
          split_size = math_ops.cumprod(mask_shape) + 1
          for dim in range(mask.shape.ndims - 3, -1, -1):
            elt_size = mask_shape[dim + 1]
            masked_splits = math_ops.range(split_size[dim]) * elt_size
            masked_values = ragged_tensor.RaggedTensor.from_row_splits(
                masked_values, masked_splits, validate=False)

      return masked_values
开发者ID:aritratony,项目名称:tensorflow,代码行数:101,代码来源:ragged_array_ops.py


注:本文中的tensorflow.python.ops.math_ops.cumprod函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。