当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.range方法代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.range方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.range方法的具体用法?Python math_ops.range怎么用?Python math_ops.range使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.range方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _transpose_batch_time

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.
    Retains as much of the static shape information as possible.
    Args:
        x: A tensor of rank 2 or higher.
    Returns:
        x transposed along the first two dimensions.
    Raises:
        ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(
        x, array_ops.concat(
            ([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(
        tensor_shape.TensorShape([
            x_static_shape[1].value, x_static_shape[0].value
        ]).concatenate(x_static_shape[2:]))
    return x_t 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:26,代码来源:dynamic_decoder.py

示例2: unstack

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def unstack(self, value, name=None):
    """Unstack the values of a `Tensor` in the TensorArray.

    If input value shapes have rank-`R`, then the output TensorArray will
    contain elements whose shapes are rank-`(R-1)`.

    Args:
      value: (N+1)-D.  Tensor of type `dtype`.  The Tensor to unstack.
      name: A name for the operation (optional).

    Returns:
      A new TensorArray object with flow that ensures the unstack occurs.
      Use this object all for subsequent operations.

    Raises:
      ValueError: if the shape inference fails.
    """
    with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
      num_elements = array_ops.shape(value)[0]
      return self.scatter(
          indices=math_ops.range(0, num_elements), value=value, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:tensor_array_ops.py

示例3: _TileGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _TileGrad(op, grad):
  """Sum reduces grad along the tiled dimensions."""
  assert isinstance(grad, ops.Tensor)
  input_shape = array_ops.shape(op.inputs[0])
  # We interleave multiples and input_shape to get split_shape,
  # reshape grad to split_shape, and reduce along all even
  # dimensions (the tiled dimensions) to get the result
  # with shape input_shape.  For example
  #   input_shape = [20, 30, 40]
  #   multiples = [2, 3, 4]
  #   split_shape = [2, 20, 3, 30, 4, 40]
  #   axes = [0, 2, 4]
  split_shape = array_ops.reshape(
      array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
  axes = math_ops.range(0, array_ops.size(split_shape), 2)
  input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
  # Fix shape inference
  input_grad.set_shape(op.inputs[0].get_shape())
  return [input_grad, None] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:array_grad.py

示例4: _event_dims_tensor

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _event_dims_tensor(self, sample):
    """Return a 1D `int32` tensor: `range(rank(sample))[-event_ndims:]`."""
    if self.event_ndims is None:
      raise ValueError("Jacobian cannot be computed with unknown event_ndims")
    static_event_ndims = tensor_util.constant_value(self.event_ndims)
    static_rank = sample.get_shape().ndims
    if static_event_ndims is not None and static_rank is not None:
      return ops.convert_to_tensor(
          static_rank + np.arange(-static_event_ndims, 0).astype(np.int32))

    if static_event_ndims is not None:
      event_range = np.arange(-static_event_ndims, 0).astype(np.int32)
    else:
      event_range = math_ops.range(-self.event_ndims, 0, dtype=dtypes.int32)

    if static_rank is not None:
      return event_range + static_rank
    else:
      return event_range + array_ops.rank(sample) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:bijector_impl.py

示例5: _reduce_join_reduction_dims

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _reduce_join_reduction_dims(x, axis, reduction_indices):
  """Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
  # TODO(aselle): Remove this after deprecation
  if reduction_indices is not None:
    if axis is not None:
      raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
    axis = reduction_indices
  if axis is not None:
    return axis
  else:
    # Fast path: avoid creating Rank and Range ops if ndims is known.
    if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
      return constant_op.constant(
          np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)

    # Otherwise, we rely on Range and Rank to do the right thing at run-time.
    return math_ops.range(array_ops.rank(x) - 1, -1, -1) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:string_ops.py

示例6: _BiasAddGradV1

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _BiasAddGradV1(unused_bias_op, received_grad):
  """Return the gradients for the 2 inputs of bias_op.

  The first input of unused_bias_op is the tensor t, and its gradient is
  just the gradient the unused_bias_op received.

  The second input of unused_bias_op is the bias vector which has one fewer
  dimension than "received_grad" (the batch dimension.)  Its gradient is the
  received gradient Summed on the batch dimension, which is the first dimension.

  Args:
    unused_bias_op: The BiasOp for which we need to generate gradients.
    received_grad: Tensor.  The gradients passed to the BiasOp.

  Returns:
    Two tensors, the first one for the "tensor" input of the BiasOp,
    the second one for the "bias" input of the BiasOp.
  """
  reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
  return (received_grad, math_ops.reduce_sum(received_grad,
                                             reduction_dim_tensor)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:nn_grad.py

示例7: count_params

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def count_params(x):
  """Returns the number of scalars in a Keras variable.

  Arguments:
      x: Keras variable.

  Returns:
      Integer, the number of scalars in `x`.

  Example:
  ```python
      >>> kvar = K.zeros((2,3))
      >>> K.count_params(kvar)
      6
      >>> K.eval(kvar)
      array([[ 0.,  0.,  0.],
             [ 0.,  0.,  0.]], dtype=float32)
  ```
  """
  shape = x.get_shape()
  return np.prod([shape[i]._value for i in range(len(shape))]) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:backend.py

示例8: _lengths_to_masks

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _lengths_to_masks(lengths, max_length):
  """Creates a binary matrix that can be used to mask away padding.

  Args:
    lengths: A vector of integers representing lengths.
    max_length: An integer indicating the maximum length. All values in
      lengths should be less than max_length.
  Returns:
    masks: Masks that can be used to get rid of padding.
  """
  tiled_ranges = array_ops.tile(
      array_ops.expand_dims(math_ops.range(max_length), 0),
      [array_ops.shape(lengths)[0], 1])
  lengths = array_ops.expand_dims(lengths, 1)
  masks = math_ops.to_float(
      math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths))
  return masks 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:crf.py

示例9: __init__

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def __init__(self,
               key_dtype,
               value_dtype,
               default_value,
               empty_key,
               num_shards=1,
               name='ShardedMutableHashTable'):
    with ops.name_scope(name, 'sharded_mutable_hash_table') as scope:
      super(ShardedMutableDenseHashTable, self).__init__(key_dtype,
                                                         value_dtype, scope)
      table_shards = []
      for i in range(num_shards):
        table_shards.append(
            lookup.MutableDenseHashTable(
                key_dtype=key_dtype,
                value_dtype=value_dtype,
                default_value=default_value,
                empty_key=empty_key,
                name='%s-%d-of-%d' % (name, i + 1, num_shards)))
      self._table_shards = table_shards
      # TODO(andreasst): add a value_shape() method to LookupInterface
      # pylint: disable=protected-access
      self._value_shape = self._table_shards[0]._value_shape
      # pylint: enable=protected-access 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:sharded_mutable_dense_hashtable.py

示例10: insert

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def insert(self, keys, values, name=None):
    self._check_keys(keys)
    num_shards = self._num_shards
    if num_shards == 1:
      return self._table_shards[0].insert(keys, values, name=name)

    shard_indices = self._shard_indices(keys)
    # TODO(andreasst): support 'keys' that are not vectors
    key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
                                                 num_shards)
    value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
                                                   num_shards)
    return_values = [
        self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
        for i in range(num_shards)
    ]

    return control_flow_ops.group(*return_values) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:sharded_mutable_dense_hashtable.py

示例11: _mean

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _mean(self):
    with ops.control_dependencies(self._assertions):
      distribution_means = [d.mean() for d in self.components]
      cat_probs = self._cat_probs(log_probs=False)
      # This was checked to not be None at construction time.
      static_event_rank = self.event_shape.ndims
      # Expand the rank of x up to static_event_rank times so that
      # broadcasting works correctly.
      def expand(x):
        expanded_x = x
        for _ in range(static_event_rank):
          expanded_x = array_ops.expand_dims(expanded_x, -1)
        return expanded_x
      cat_probs = [expand(c_p) for c_p in cat_probs]
      partial_means = [
          c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
      ]
      # These should all be the same shape by virtue of matching
      # batch_shape and event_shape.
      return math_ops.add_n(partial_means) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:mixture.py

示例12: unstack

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def unstack(self, value, name=None):
    """Unstack the values of a `Tensor` in the TensorArray.

    If input value shapes have rank-`R`, then the output TensorArray will
    contain elements whose shapes are rank-`(R-1)`.
    Args:
      value: (N+1)-D.  Tensor of type `dtype`.  The Tensor to unstack.
      name: A name for the operation (optional).

    Returns:
      A new TensorArray object with flow that ensures the unstack occurs.
      Use this object all for subsequent operations.

    Raises:
      ValueError: if the shape inference fails.
    """
    with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
      num_elements = array_ops.shape(value)[0]
      return self.scatter(
          indices=math_ops.range(0, num_elements), value=value, name=name) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:22,代码来源:tensor_array_ops.py

示例13: _transpose_batch_time

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _transpose_batch_time(x):
  """Transpose the batch and time dimensions of a Tensor.

  Retains as much of the static shape information as possible.

  Args:
    x: A tensor of rank 2 or higher.

  Returns:
    x transposed along the first two dimensions.

  Raises:
    ValueError: if `x` is rank 1 or lower.
  """
  x_static_shape = x.get_shape()
  if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
    raise ValueError(
        "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
        (x, x_static_shape))
  x_rank = array_ops.rank(x)
  x_t = array_ops.transpose(
      x, array_ops.concat(
          ([1, 0], math_ops.range(2, x_rank)), axis=0))
  x_t.set_shape(
      tensor_shape.TensorShape([
          x_static_shape[1].value, x_static_shape[0].value
      ]).concatenate(x_static_shape[2:]))
  return x_t 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,代码来源:rnn.py

示例14: _DynamicPartitionGrads

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def _DynamicPartitionGrads(op, *grads):
  """Gradients for DynamicPartition."""
  data = op.inputs[0]
  indices = op.inputs[1]
  num_partitions = op.get_attr("num_partitions")

  prefix_shape = array_ops.shape(indices)
  original_indices = array_ops.reshape(
      math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
  partitioned_indices = data_flow_ops.dynamic_partition(
      original_indices, indices, num_partitions)
  reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads)
  reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
  return [reconstructed, None] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:16,代码来源:data_flow_grad.py

示例15: clip_by_average_norm

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import range [as 别名]
def clip_by_average_norm(t, clip_norm, name=None):
  """Clips tensor values to a maximum average L2-norm.

  Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
  normalizes `t` so that its average L2-norm is less than or equal to
  `clip_norm`. Specifically, if the average L2-norm is already less than or
  equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
  greater than `clip_norm`, then this operation returns a tensor of the same
  type and shape as `t` with its values set to:

  `t * clip_norm / l2norm_avg(t)`

  In this case, the average L2-norm of the output tensor is `clip_norm`.

  This operation is typically used to clip gradients before applying them with
  an optimizer.

  Args:
    t: A `Tensor`.
    clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
    name: A name for the operation (optional).

  Returns:
    A clipped `Tensor`.
  """
  with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
    t = ops.convert_to_tensor(t, name="t")

    # Calculate L2-norm per element, clip elements by ratio of clip_norm to
    # L2-norm per element
    n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
    l2norm_inv = math_ops.rsqrt(
        math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
    tclip = array_ops.identity(
        t * clip_norm * math_ops.minimum(
            l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
        name=name)

  return tclip 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:41,代码来源:clip_ops.py


注:本文中的tensorflow.python.ops.math_ops.range方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。