当前位置: 首页>>代码示例>>Python>>正文


Python v1.reduce_prod方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.reduce_prod方法的典型用法代码示例。如果您正苦于以下问题:Python v1.reduce_prod方法的具体用法?Python v1.reduce_prod怎么用?Python v1.reduce_prod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.reduce_prod方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _call_sampler

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def _call_sampler(sample_n_fn, sample_shape, name=None):
  """Reshapes vector of samples."""
  with tf.name_scope(name, "call_sampler", values=[sample_shape]):
    sample_shape = tf.convert_to_tensor(
        sample_shape, dtype=tf.int32, name="sample_shape")
    # Ensure sample_shape is a vector (vs just a scalar).
    pad = tf.cast(tf.equal(tf.rank(sample_shape), 0), tf.int32)
    sample_shape = tf.reshape(
        sample_shape,
        tf.pad(tf.shape(sample_shape),
               paddings=[[pad, 0]],
               constant_values=1))
    samples = sample_n_fn(tf.reduce_prod(sample_shape))
    batch_event_shape = tf.shape(samples)[1:]
    final_shape = tf.concat([sample_shape, batch_event_shape], 0)
    return tf.reshape(samples, final_shape) 
开发者ID:magenta,项目名称:magenta,代码行数:18,代码来源:seq2seq.py

示例2: video_pixel_noise_bottom

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def video_pixel_noise_bottom(x, model_hparams, vocab_size):
  """Bottom transformation for video."""
  input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25)
  inputs = x
  if model_hparams.mode == tf.estimator.ModeKeys.TRAIN:
    background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3])
    input_shape = common_layers.shape_list(inputs)
    input_size = tf.reduce_prod(input_shape[:-1])
    input_mask = tf.multinomial(
        tf.log([[input_noise, 1.-input_noise]]), input_size)
    input_mask = tf.reshape(tf.cast(input_mask, tf.int32),
                            input_shape[:-1]+[1])
    inputs = inputs * input_mask + background * (1 - input_mask)
  return video_bottom(inputs, model_hparams, vocab_size) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:16,代码来源:modalities.py

示例3: apply_spectral_norm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def apply_spectral_norm(x):
  """Normalizes x using the spectral norm.

  The implementation follows Algorithm 1 of
  https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
  reshaped such that the number of channels (last-dimension) is the same.

  Args:
    x: Tensor with the last dimension equal to the number of filters.

  Returns:
    x: Tensor with the same shape as x normalized by the spectral norm.
    assign_op: Op to be run after every step to update the vector "u".
  """
  weights_shape = shape_list(x)
  other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]

  # Reshape into a 2-D matrix with outer size num_filters.
  weights_2d = tf.reshape(x, (other, num_filters))

  # v = Wu / ||W u||
  with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
    u = tf.get_variable(
        "u", [num_filters, 1],
        initializer=tf.truncated_normal_initializer(),
        trainable=False)
  v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))

  # u_new = vW / ||v W||
  u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))

  # s = v*W*u
  spectral_norm = tf.squeeze(
      tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))

  # set u equal to u_new in the next iteration.
  assign_op = tf.assign(u, tf.transpose(u_new))
  return tf.divide(x, spectral_norm), assign_op 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:40,代码来源:common_layers.py

示例4: weight_targeting

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def weight_targeting(w, k):
  """Weight-level magnitude pruning."""
  k = tf.to_int32(k)
  w_shape = shape_list(w)
  size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
  w = tf.reshape(w, [size, w_shape[-1]])

  transpose_w = tf.transpose(w)
  thres = contrib.framework().sort(tf.abs(transpose_w), axis=1)[:, k]
  mask = to_float(thres[None, :] >= tf.abs(w))

  return tf.reshape(mask, w_shape) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:14,代码来源:common_layers.py

示例5: unit_targeting

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def unit_targeting(w, k):
  """Unit-level magnitude pruning."""
  k = tf.to_int32(k)
  w_shape = shape_list(w)
  size = tf.to_int32(tf.reduce_prod(w_shape[:-1]))
  w = tf.reshape(w, [size, w_shape[-1]])

  norm = tf.norm(w, axis=0)
  thres = contrib.framework().sort(norm, axis=0)[k]
  mask = to_float(thres >= norm)[None, :]
  mask = tf.tile(mask, [size, 1])

  return tf.reshape(mask, w_shape) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:15,代码来源:common_layers.py

示例6: _conv1d_expression

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def _conv1d_expression(expr, w, padding, stride):
    """Scale a linear expression by w (through a convolutional layer)."""
    b = tf.nn.conv1d(expr.b, w, padding=padding, stride=stride)
    shape = tf.concat([[tf.reduce_prod(tf.shape(expr.w)[:2])],
                       tf.shape(expr.w)[2:]], axis=0)
    w = tf.nn.conv1d(tf.reshape(expr.w, shape), w, padding=padding,
                     stride=stride)
    shape = tf.concat([tf.shape(expr.w)[:2], tf.shape(w)[1:]], axis=0)
    w = tf.reshape(w, shape)
    return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:12,代码来源:fastlin.py

示例7: _conv2d_expression

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def _conv2d_expression(expr, w, padding, strides):
    """Scale a linear expression by w (through a convolutional layer)."""
    b = tf.nn.convolution(expr.b, w, padding=padding, strides=strides)
    shape = tf.concat([[tf.reduce_prod(tf.shape(expr.w)[:2])],
                       tf.shape(expr.w)[2:]], axis=0)
    w = tf.nn.convolution(tf.reshape(expr.w, shape), w, padding=padding,
                          strides=strides)
    shape = tf.concat([tf.shape(expr.w)[:2], tf.shape(w)[1:]], axis=0)
    w = tf.reshape(w, shape)
    return LinearExpression(w=w, b=b, lower=expr.lower, upper=expr.upper) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:12,代码来源:fastlin.py

示例8: _get_moments

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def _get_moments(self, inputs):
    # Like tf.nn.moments but unbiased sample std. deviation.
    # Reduce over channels only.
    mean = tf.reduce_mean(inputs, [self.axis], keepdims=True, name="mean")
    variance = tf.reduce_sum(
        tf.squared_difference(inputs, tf.stop_gradient(mean)),
        [self.axis], keepdims=True, name="variance_sum")
    # Divide by N-1
    inputs_shape = tf.shape(inputs)
    counts = tf.reduce_prod([inputs_shape[ax] for ax in [self.axis]])
    variance /= (tf.cast(counts, tf.float32) - 1)
    return mean, variance 
开发者ID:tensorflow,项目名称:compression,代码行数:14,代码来源:archs.py

示例9: estimate_entropy

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def estimate_entropy(entropy_model, inputs, spatial_shape=None) -> EntropyInfo:
  """Compresses `inputs` with the given entropy model and estimates entropy.

  Arguments:
    entropy_model: An `EntropyModel` instance.
    inputs: The input tensor to be fed to the entropy model.
    spatial_shape: Shape of the input image (HxW). Must be provided for
      `valid == False`.

  Returns:
    The 'noisy' and quantized inputs, as well as differential and discrete
    entropy estimates, as an `EntropyInfo` named tuple.
  """
  # We are summing over the log likelihood tensor, so we need to explicitly
  # divide by the batch size.
  batch = tf.cast(tf.shape(inputs)[0], tf.float32)

  # Divide by this to flip sign and convert from nats to bits.
  quotient = tf.constant(-np.log(2), dtype=tf.float32)

  num_pixels = tf.cast(tf.reduce_prod(spatial_shape), tf.float32)

  # Compute noisy outputs and estimate differential entropy.
  noisy, likelihood = entropy_model(inputs, training=True)
  log_likelihood = tf.log(likelihood)
  nbits = tf.reduce_sum(log_likelihood) / (quotient * batch)
  nbpp = nbits / num_pixels

  # Compute quantized outputs and estimate discrete entropy.
  quantized, likelihood = entropy_model(inputs, training=False)
  log_likelihood = tf.log(likelihood)
  qbits = tf.reduce_sum(log_likelihood) / (quotient * batch)
  qbpp = qbits / num_pixels

  return EntropyInfo(noisy, quantized, nbits, nbpp, qbits, qbpp) 
开发者ID:tensorflow,项目名称:compression,代码行数:37,代码来源:archs.py

示例10: flatten_dimensions

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def flatten_dimensions(inputs, first, last):
  """Flattens `K-d` tensor along [first, last) dimensions.

  Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
  [D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].

  Example:
  `inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
  new_tensor = flatten_dimensions(inputs, first=1, last=3)
  new_tensor.shape -> [10, 100, 20, 3].

  Args:
    inputs: a tensor with shape [D0, D1, ..., D(K-1)].
    first: first value for the range of dimensions to flatten.
    last: last value for the range of dimensions to flatten. Note that the last
      dimension itself is excluded.

  Returns:
    a tensor with shape
    [D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
     D(K-1)].

  Raises:
    ValueError: if first and last arguments are incorrect.
  """
  if first >= inputs.shape.ndims or last > inputs.shape.ndims:
    raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
                     'found {} and {} respectively while ndims is {}'.format(
                         first, last, inputs.shape.ndims))
  shape = combined_static_and_dynamic_shape(inputs)
  flattened_dim_prod = tf.reduce_prod(shape[first:last],
                                      keepdims=True)
  new_shape = tf.concat([shape[:first], flattened_dim_prod,
                         shape[last:]], axis=0)
  return tf.reshape(inputs, new_shape) 
开发者ID:tensorflow,项目名称:models,代码行数:37,代码来源:shape_utils.py

示例11: expand_first_dimension

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def expand_first_dimension(inputs, dims):
  """Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.

  Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
  [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].

  Example:
  `inputs` is a tensor with shape [50, 20, 20, 3].
  new_tensor = expand_first_dimension(inputs, [10, 5]).
  new_tensor.shape -> [10, 5, 20, 20, 3].

  Args:
    inputs: a tensor with shape [D0, D1, ..., D(K-1)].
    dims: List with new dimensions to expand first axis into. The length of
      `dims` is typically 2 or larger.

  Returns:
    a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
  """
  inputs_shape = combined_static_and_dynamic_shape(inputs)
  expanded_shape = tf.stack(dims + inputs_shape[1:])

  # Verify that it is possible to expand the first axis of inputs.
  assert_op = tf.assert_equal(
      inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
      message=('First dimension of `inputs` cannot be expanded into provided '
               '`dims`'))

  with tf.control_dependencies([assert_op]):
    inputs_reshaped = tf.reshape(inputs, expanded_shape)

  return inputs_reshaped 
开发者ID:tensorflow,项目名称:models,代码行数:34,代码来源:shape_utils.py

示例12: flop_coeff

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def flop_coeff(op):
  """Computes the coefficient of number of flops associated with a convolution.

  The FLOPs cost of a convolution is given by C * output_depth * input_depth,
  where C = 2 * output_width * output_height * filter_size. The 2 is because we
  have one multiplication and one addition for each convolution weight and
  pixel. This function returns C.

  Supported operations names are listed in cost_calculator.FLOP_OPS.

  Args:
    op: A tf.Operation of supported types.

  Returns:
    A float, the coefficient that when multiplied by the input depth and by the
    output depth gives the number of flops needed to compute the convolution.

  Raises:
    ValueError: conv_op is not a supported tf.Operation.
  """
  if not is_flop_op(op):
    return 0.0
  if op.type == 'MatMul':
    # A MatMul is like a 1x1 conv with an output size of 1x1, so from the factor
    # below only the 2.0 remains.
    return 2.0
  # Looking at the output shape makes it easy to automatically take into
  # account strides and the type of padding.
  def kernel_num_elements(tensor):
    """Returns the number of elements of a kernel.

    Args:
      tensor: The weight tensor.

    Returns:
      Number of elements of the kernel (either float or tf.float).
    """
    num_elements = np.prod(tensor.shape.dims[1:-1]).value
    if num_elements:
      return num_elements
    return tf.to_float(tf.reduce_prod(tf.shape(tensor)[1:-1]))

  if op.type in ('Conv2D', 'DepthwiseConv2dNative', 'Conv3D'):
    num_elements = kernel_num_elements(op.outputs[0])
  elif op.type == 'Conv2DBackpropInput':
    # For a transposed convolution, the input and the output are swapped (as
    # far as shapes are concerned). In other words, for a given filter shape
    # and stride, if Conv2D maps from shapeX to shapeY, Conv2DBackpropInput
    # maps from shapeY to shapeX. Therefore wherever we use the output shape
    # for Conv2D, we use the input shape for Conv2DBackpropInput.
    num_elements = kernel_num_elements(cost_calculator.get_input_activation(op))
  else:
    # Can only happen if elements are added to FLOP_OPS and not taken care of.
    assert False, '%s in cost_calculator.FLOP_OPS but not handled' % op.type
  # Handle dynamic shaping while keeping old code path to not break
  # other clients.
  return 2.0 * num_elements * _get_conv_filter_size(op) 
开发者ID:google-research,项目名称:morph-net,代码行数:59,代码来源:resource_function.py

示例13: pixel_control_rewards

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import reduce_prod [as 别名]
def pixel_control_rewards(observations, cell_size):
  """Calculates pixel control task rewards from observation sequence.

  The observations are first split in a grid of KxK cells. For each cell a
  distinct pseudo reward is computed as the average absolute change in pixel
  intensity for all pixels in the cell. The change in intensity is averaged
  across both pixels and channels (e.g. RGB).

  The `observations` provided to this function should be cropped suitably, to
  ensure that the observations' height and width are a multiple of `cell_size`.
  The values of the `observations` tensor should be rescaled to [0, 1]. In the
  UNREAL agent observations are cropped to 80x80, and each cell is 4x4 in size.

  See "Reinforcement Learning with Unsupervised Auxiliary Tasks" by Jaderberg,
  Mnih, Czarnecki et al. (https://arxiv.org/abs/1611.05397).

  Args:
    observations: A tensor of shape `[T+1,B,H,W,C...]`, where
      * `T` is the sequence length, `B` is the batch size.
      * `H` is height, `W` is width.
      * `C...` is at least one channel dimension (e.g., colour, stack).
      * `T` and `B` can be statically unknown.
    cell_size: The size of each cell.

  Returns:
    A tensor of pixel control rewards calculated from the observation. The
    shape is `[T,B,H',W']`, where `H'` and `W'` are determined by the
    `cell_size`. If evenly-divisible, `H' = H/cell_size`, and similar for `W`.
  """
  # Calculate the absolute differences across the sequence.
  abs_diff = tf.abs(observations[1:] - observations[:-1])
  # Average over cells. `abs_diff` has shape [T,B,H,W,C...], e.g.,
  # [T,B,H,W,C] if we have a colour channel. We want to use the TF avg_pool3d
  # op, but it expects 5D inputs so we collapse all channel dimensions.
  # Merge remaining dimensions after W: [T,B,H,W,C'].
  full_shape = tf.shape(abs_diff)
  preserved_shape = full_shape[:4]
  trailing_shape = (tf.reduce_prod(full_shape[4:]),)
  shape = tf.concat([preserved_shape, trailing_shape], 0)
  abs_diff = tf.reshape(abs_diff, shape)
  # Apply the averaging using average pooling and reducing over channel.
  avg_abs_diff = tf.nn.avg_pool3d(
      abs_diff,
      ksize=[1, 1, cell_size, cell_size, 1],
      strides=[1, 1, cell_size, cell_size, 1],
      padding="VALID")  # [T,B,H',W',C'].
  pseudo_rewards = tf.reduce_mean(
      avg_abs_diff, axis=[4], name="pseudo_rewards")  # [T,B,H',W'].
  sequence_batch = abs_diff.get_shape()[:2]
  new_height_width = avg_abs_diff.get_shape()[2:4]
  pseudo_rewards.set_shape(sequence_batch.concatenate(new_height_width))
  return pseudo_rewards 
开发者ID:deepmind,项目名称:trfl,代码行数:54,代码来源:pixel_control_ops.py


注:本文中的tensorflow.compat.v1.reduce_prod方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。