当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.reduce_prod方法代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.reduce_prod方法的典型用法代码示例。如果您正苦于以下问题:Python math_ops.reduce_prod方法的具体用法?Python math_ops.reduce_prod怎么用?Python math_ops.reduce_prod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.math_ops的用法示例。


在下文中一共展示了math_ops.reduce_prod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prod

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def prod(x, axis=None, keepdims=False):
  """Multiplies the values in a tensor, alongside the specified axis.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to compute the product.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`,
          the reduced dimension is retained with length 1.

  Returns:
      A tensor with the product of elements of `x`.
  """
  axis = _normalize_axis(axis, ndim(x))
  return math_ops.reduce_prod(x, reduction_indices=axis, keep_dims=keepdims) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:backend.py

示例2: _MeanGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _MeanGrad(op, grad):
  """Gradient for Mean."""
  sum_grad = _SumGrad(op, grad)[0]
  input_shape = array_ops.shape(op.inputs[0])
  output_shape = array_ops.shape(op.outputs[0])
  # TODO(apassos) remove this device hackery as eager copy to device becomes
  # more seamless.
  with ops.colocate_with(input_shape):
    factor = _safe_shape_div(
        math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
  if context.in_eager_mode():
    # Note that we go through numpy here just so we use the eager per-device
    # scalar cache. We know the factor is a host memory tensor because it's a
    # shape, and we also know that converting a scalar into a tensor triggers a
    # per-device cache.
    factor = factor.numpy()
    factor = constant_op.constant(factor, dtype=sum_grad.dtype)
  return sum_grad / math_ops.cast(factor, sum_grad.dtype), None 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:math_grad.py

示例3: _sparse_inner_flatten

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _sparse_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  inputs_rank = inputs.dense_shape.get_shape().as_list()[0]
  if inputs_rank < new_rank:
    raise ValueError(
        'Inputs has rank less than new_rank. {} must have rank at least'
        ' {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank,
                                                 inputs.get_shape()))

  outer_dimensions = inputs.dense_shape[:new_rank - 1]
  inner_dimensions = inputs.dense_shape[new_rank - 1:]
  new_shape = array_ops.concat(
      (outer_dimensions, [math_ops.reduce_prod(inner_dimensions)]), 0)
  flattened = sparse_ops.sparse_reshape(inputs, new_shape)
  return flattened 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:17,代码来源:layers.py

示例4: per_image_standardization

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def per_image_standardization(image):
  """Linearly scales `image` to have zero mean and unit norm.

  This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
  of all values in image, and
  `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.

  `stddev` is the standard deviation of all values in `image`. It is capped
  away from zero to protect against division by 0 when handling uniform images.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`.

  Returns:
    The standardized image with same shape as `image`.

  Raises:
    ValueError: if the shape of 'image' is incompatible with this function.
  """
  image = ops.convert_to_tensor(image, name='image')
  image = control_flow_ops.with_dependencies(
      _Check3DImage(image, require_static=False), image)
  num_pixels = math_ops.reduce_prod(array_ops.shape(image))

  image = math_ops.cast(image, dtype=dtypes.float32)
  image_mean = math_ops.reduce_mean(image)

  variance = (math_ops.reduce_mean(math_ops.square(image)) -
              math_ops.square(image_mean))
  variance = gen_nn_ops.relu(variance)
  stddev = math_ops.sqrt(variance)

  # Apply a minimum normalization that protects us against uniform images.
  min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
  pixel_value_scale = math_ops.maximum(stddev, min_stddev)
  pixel_value_offset = image_mean

  image = math_ops.subtract(image, pixel_value_offset)
  image = math_ops.div(image, pixel_value_scale)
  return image 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:42,代码来源:image_ops_impl.py

示例5: _FFTSizeForGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _FFTSizeForGrad(grad, rank):
  return math_ops.reduce_prod(array_ops.shape(grad)[-rank:]) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:4,代码来源:spectral_grad.py

示例6: _prob

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _prob(self, y):
    x = self.bijector.inverse(y)
    ildj = self.bijector.inverse_log_det_jacobian(y)
    x = self._maybe_rotate_dims(x, rotate_right=True)
    prob = self.distribution.prob(x)
    if self._is_maybe_event_override:
      prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
    prob *= math_ops.exp(ildj)
    if self._is_maybe_event_override:
      prob.set_shape(array_ops.broadcast_static_shape(
          y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
    return prob 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:14,代码来源:transformed_distribution.py

示例7: _entropy

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _entropy(self):
    if not self.bijector.is_constant_jacobian:
      raise NotImplementedError("entropy is not implemented")
    # Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
    # can be shown that:
    #   H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
    # If is_constant_jacobian then:
    #   E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
    # where c can by anything.
    entropy = self.distribution.entropy()
    if self._is_maybe_event_override:
      # H[X] = sum_i H[X_i] if X_i are mutually independent.
      # This means that a reduce_sum is a simple rescaling.
      entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
                               dtype=entropy.dtype.base_dtype)
    if self._is_maybe_batch_override:
      new_shape = array_ops.concat([
          _ones_like(self._override_batch_shape),
          self.distribution.batch_shape_tensor()
      ], 0)
      entropy = array_ops.reshape(entropy, new_shape)
      multiples = array_ops.concat([
          self._override_batch_shape,
          _ones_like(self.distribution.batch_shape_tensor())
      ], 0)
      entropy = array_ops.tile(entropy, multiples)
    dummy = array_ops.zeros([], self.dtype)
    entropy -= self.bijector.inverse_log_det_jacobian(dummy)
    entropy.set_shape(self.batch_shape)
    return entropy 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:32,代码来源:transformed_distribution.py

示例8: covariance

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def covariance(self, name="covariance"):
    """Covariance.

    Covariance is (possibly) defined only for non-scalar-event distributions.

    For example, for a length-`k`, vector-valued distribution, it is calculated
    as,

    ```none
    Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
    ```

    where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
    denotes expectation.

    Alternatively, for non-vector, multivariate distributions (e.g.,
    matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
    under some vectorization of the events, i.e.,

    ```none
    Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
    ```

    where `Cov` is a (batch of) `k' x k'` matrices,
    `0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
    mapping indices of this distribution's event dimensions to indices of a
    length-`k'` vector.

    Args:
      name: The name to give this op.

    Returns:
      covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
        where the first `n` dimensions are batch coordinates and
        `k' = reduce_prod(self.event_shape)`.
    """
    with self._name_scope(name):
      return self._covariance() 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:40,代码来源:distribution.py

示例9: _MeanGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _MeanGrad(op, grad):
  """Gradient for Mean."""
  sum_grad = _SumGrad(op, grad)[0]
  input_shape = array_ops.shape(op.inputs[0])
  output_shape = array_ops.shape(op.outputs[0])
  factor = _safe_shape_div(
      math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
  return sum_grad / math_ops.cast(factor, sum_grad.dtype), None 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:10,代码来源:math_grad.py

示例10: _ProdGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _ProdGrad(op, grad):
  """Gradient for Prod."""
  # The gradient can be expressed by dividing the product by each entry of the
  # input tensor, but this approach can't deal with zeros in the input.
  # Here, we avoid this problem by composing the output as a product of two
  # cumprod operations.

  input_shape = array_ops.shape(op.inputs[0])
  # Reshape reduction indices for the case where the parameter is a scalar
  reduction_indices = array_ops.reshape(op.inputs[1], [-1])

  # Expand grad to full input shape
  output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
  tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
  grad = array_ops.reshape(grad, output_shape_kept_dims)
  grad = array_ops.tile(grad, tile_scaling)

  # Pack all reduced dimensions into a single one, so we can perform the
  # cumprod ops. If the reduction dims list is empty, it defaults to float32,
  # so we need to cast here.  We put all the shape-related ops on CPU to avoid
  # copying back and forth, and since listdiff is CPU only.
  with ops.device("/cpu:0"):
    reduced = math_ops.cast(reduction_indices, dtypes.int32)
    idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
    other, _ = array_ops.setdiff1d(idx, reduced)
    perm = array_ops.concat([reduced, other], 0)
    reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
    other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
  permuted = array_ops.transpose(op.inputs[0], perm)
  permuted_shape = array_ops.shape(permuted)
  reshaped = array_ops.reshape(permuted, (reduced_num, other_num))

  # Calculate product, leaving out the current entry
  left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
  right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
  y = array_ops.reshape(left * right, permuted_shape)

  # Invert the transpose and reshape operations.
  # Make sure to set the statically known shape information through a reshape.
  out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
  return array_ops.reshape(out, input_shape), None 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:43,代码来源:math_grad.py

示例11: _TopKGrad

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _TopKGrad(op, grad, _):
  """Return the gradients for TopK.

  Args:
    op: The TopKOp for which we need to generate gradients.
    grad: Tensor. The gradients passed to the TopKOp.

  Returns:
    A list of two tensors, the first being the gradient w.r.t to the input and
    TopK, and the second being the gradient w.r.t. to the indices (all zero).
  """
  in_shape = array_ops.shape(op.inputs[0])
  ind_shape = array_ops.shape(op.outputs[1])

  ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
  # Flatten indices to 2D.
  ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))

  in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
  outerdim = array_ops.shape(ind_2d)[0]
  # Compute linear indices (flattened to 1D).
  ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
      math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])

  # Substitute grad to appropriate locations and fill the rest with zeros,
  # finally reshaping it to the original input shape.
  return [array_ops.reshape(
      sparse_ops.sparse_to_dense(ind,
                                 array_ops.reshape(
                                     math_ops.reduce_prod(in_shape), [1]),
                                 array_ops.reshape(grad, [-1]),
                                 validate_indices=False),
      in_shape), array_ops.zeros(
          [], dtype=dtypes.int32)] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:36,代码来源:nn_grad.py

示例12: embedding_lookup_unique

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def embedding_lookup_unique(params, ids, name=None):
  """Version of embedding_lookup that avoids duplicate lookups.

  This can save communication in the case of repeated ids.
  Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
  which allows to not reshape input/output to fit gather.

  Args:
    params: A list of tensors with the same shape and type, or a
      `PartitionedVariable`. Shape `[index, d1, d2, ...]`.
    ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
      the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
    name: A name for this operation (optional).

  Returns:
    A `Tensor` with the same type as the tensors in `params` and dimension of
    `[ids1, ids2, d1, d2, ...]`.

  Raises:
    ValueError: If `params` is empty.
  """
  with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
    ids = ops.convert_to_tensor(ids)
    shape = array_ops.shape(ids)
    ids_flat = array_ops.reshape(
        ids, math_ops.reduce_prod(shape, keep_dims=True))
    unique_ids, idx = array_ops.unique(ids_flat)
    unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
    embeds_flat = array_ops.gather(unique_embeddings, idx)
    embed_shape = array_ops.concat(
        [shape, array_ops.shape(unique_embeddings)[1:]], 0)
    embeds = array_ops.reshape(embeds_flat, embed_shape)
    embeds.set_shape(ids.get_shape().concatenate(
        unique_embeddings.get_shape()[1:]))
    return embeds 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:37,代码来源:embedding_ops.py

示例13: _sparse_inner_flatten

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _sparse_inner_flatten(inputs, new_rank):
  """Helper function for `inner_flatten`."""
  inputs_rank = inputs.dense_shape.get_shape().as_list()[0]
  if inputs_rank < new_rank:
    raise ValueError(
        'Inputs has rank less than new_rank. {} must have rank at least'
        ' {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank,
                                                 inputs.get_shape()))

  outer_dimensions = inputs.dense_shape[:new_rank - 1]
  inner_dimensions = inputs.dense_shape[new_rank - 1:]
  new_shape = array_ops.concat((outer_dimensions,
                                [math_ops.reduce_prod(inner_dimensions)]), 0)
  flattened = sparse_ops.sparse_reshape(inputs, new_shape)
  return flattened 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:17,代码来源:layers.py

示例14: _flip_vector_to_matrix_dynamic

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
  """flip_vector_to_matrix with dynamic shapes."""
  # Shapes associated with batch_shape
  batch_rank = array_ops.size(batch_shape)

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = array_ops.shape(vec)
  vec_rank = array_ops.rank(vec)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = array_ops.strided_slice(vec_shape, [0], [m])
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
  k = array_ops.gather(vec_shape, vec_rank - 1)
  new_shape = array_ops.concat((batch_shape, [k], condensed_shape), 0)

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
                            0)
    return array_ops.transpose(vec, perm=perm)

  x_flipped = control_flow_ops.cond(
      math_ops.less(0, m),
      _flip_front_dims_to_back,
      lambda: array_ops.expand_dims(vec, -1))

  return array_ops.reshape(x_flipped, new_shape) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:34,代码来源:operator_pd.py

示例15: _flip_vector_to_matrix_static

# 需要导入模块: from tensorflow.python.ops import math_ops [as 别名]
# 或者: from tensorflow.python.ops.math_ops import reduce_prod [as 别名]
def _flip_vector_to_matrix_static(vec, batch_shape):
  """flip_vector_to_matrix with static shapes."""
  # Shapes associated with batch_shape
  batch_rank = batch_shape.ndims

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = vec.get_shape()
  vec_rank = len(vec_shape)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = vec_shape[:m]
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [np.prod(vec_shape_left)]
  k = vec_shape[-1]
  new_shape = batch_shape.concatenate(k).concatenate(condensed_shape)

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
                            0)
    return array_ops.transpose(vec, perm=perm)

  if 0 < m:
    x_flipped = _flip_front_dims_to_back()
  else:
    x_flipped = array_ops.expand_dims(vec, -1)

  return array_ops.reshape(x_flipped, new_shape) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:34,代码来源:operator_pd.py


注:本文中的tensorflow.python.ops.math_ops.reduce_prod方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。