當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.reduce_all方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.reduce_all方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.reduce_all方法的具體用法?Python v1.reduce_all怎麽用?Python v1.reduce_all使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.reduce_all方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _flat_reconstruction_loss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
    b_enc, b_dec = tf.split(
        flat_rnn_output,
        [self._nade.num_hidden, self._output_depth], axis=1)
    ll, cond_probs = self._nade.log_prob(
        flat_x_target, b_enc=b_enc, b_dec=b_dec)
    r_loss = -ll
    flat_truth = tf.cast(flat_x_target, tf.bool)
    flat_predictions = tf.greater_equal(cond_probs, 0.5)

    metric_map = {
        'metrics/accuracy':
            tf.metrics.mean(
                tf.reduce_all(tf.equal(flat_truth, flat_predictions), axis=-1)),
        'metrics/recall':
            tf.metrics.recall(flat_truth, flat_predictions),
        'metrics/precision':
            tf.metrics.precision(flat_truth, flat_predictions),
    }

    return r_loss, metric_map 
開發者ID:magenta,項目名稱:magenta,代碼行數:23,代碼來源:lstm_models.py

示例2: get_gradients_to_apply

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def get_gradients_to_apply(self, device_num, gradient_state):
    device_grads = gradient_state
    tower_grad = device_grads[device_num]

    if self.benchmark_cnn.enable_auto_loss_scale and device_num == 0:
      # Since we don't aggregate variables in --independent mode, we cannot tell
      # if there are NaNs on all GPUs. So we arbitrarily choose to only check
      # NaNs on the first GPU.
      has_inf_nan_list = []
      for grad, _ in tower_grad:
        has_inf_nan_list.append(tf.reduce_all(tf.is_finite(grad)))
      self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(has_inf_nan_list))

    return tower_grad 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:16,代碼來源:variable_mgr.py

示例3: preprocess_device_grads

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def preprocess_device_grads(self, device_grads):
    compact_grads = (self.benchmark_cnn.params.use_fp16 and
                     self.benchmark_cnn.params.compact_gradient_transfer)
    defer_grads = (self.benchmark_cnn.params.variable_consistency == 'relaxed')

    grads_to_reduce = [[g for g, _ in grad_vars] for grad_vars in device_grads]
    algorithm = batch_allreduce.algorithm_from_params(self.benchmark_cnn.params)
    reduced_grads, self._warmup_ops = algorithm.batch_all_reduce(
        grads_to_reduce, self.benchmark_cnn.params.gradient_repacking,
        compact_grads, defer_grads, self.benchmark_cnn.params.xla_compile)
    if self.benchmark_cnn.enable_auto_loss_scale:
      # Check for infs or nans
      is_finite_list = []
      with tf.name_scope('check_for_inf_and_nan'):
        for tower_grads in reduced_grads:
          with tf.colocate_with(tower_grads[0]):
            # TODO(tanmingxing): Create fused op that takes in a list of tensors
            # as input and returns scalar boolean True if there are any
            # infs/nans.
            is_finite_list.append(tf.reduce_all(
                [tf.reduce_all(tf.is_finite(g)) for g in tower_grads]))
        self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(is_finite_list))
    reduced_device_grads = [[
        (g, v) for g, (_, v) in zip(grads, grad_vars)
    ] for grads, grad_vars in zip(reduced_grads, device_grads)]
    return self.benchmark_cnn.devices, reduced_device_grads 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:28,代碼來源:variable_mgr.py

示例4: make_outer_masks

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def make_outer_masks(self, outer_masks, input_pianorolls):
    """Returns outer masks, if all zeros created by completion masking."""
    outer_masks = tf.to_float(outer_masks)
    # If outer_masks come in as all zeros, it means there's no masking,
    # which also means nothing will be generated. In this case, use
    # completion mask to make new outer masks.
    outer_masks = tf.cond(
        tf.reduce_all(tf.equal(outer_masks, 0)),
        lambda: make_completion_masks(input_pianorolls),
        lambda: outer_masks)
    return outer_masks 
開發者ID:magenta,項目名稱:magenta,代碼行數:13,代碼來源:lib_tfsampling.py

示例5: make_completion_masks

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def make_completion_masks(pianorolls, outer_masks=1.):
  pianorolls = tf.to_float(pianorolls)
  masks = tf.reduce_all(tf.equal(pianorolls, 0), axis=2, keep_dims=True)
  inner_masks = tf.to_float(masks) + 0 * pianorolls
  return inner_masks * outer_masks 
開發者ID:magenta,項目名稱:magenta,代碼行數:7,代碼來源:lib_tfsampling.py

示例6: initialize

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def initialize(self, name=None):
    with tf.name_scope(name, "TrainingHelperInitialize"):
      finished = tf.equal(0, self._sequence_length)
      all_finished = tf.reduce_all(finished)
      next_inputs = tf.cond(
          all_finished, lambda: self._zero_inputs,
          lambda: tf.nest.map_structure(  # pylint:disable=g-long-lambda
              lambda inp: inp.read(0), self._input_tas))
      return (finished, next_inputs) 
開發者ID:magenta,項目名稱:magenta,代碼行數:11,代碼來源:seq2seq.py

示例7: compare_decode_steps

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def compare_decode_steps(decode_steps_a, decode_steps_b):
  """Returns tensor of bools indicated whether decode steps are equal."""
  return tf.reduce_all(
      tf.stack([
          tf.equal(decode_steps_a.action_types, decode_steps_b.action_types),
          tf.equal(decode_steps_a.action_ids, decode_steps_b.action_ids),
      ],
               axis=0),
      axis=0) 
開發者ID:google-research,項目名稱:language,代碼行數:11,代碼來源:decode_utils.py

示例8: next_inputs

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    del sample_ids  # Unused.
    with tf.name_scope(name, "TrainingHelperNextInputs", [time, outputs]):
      next_time = time + 1
      finished = (next_time >= self._sequence_length)
      all_finished = tf.reduce_all(finished)
      next_inputs = tf.cond(
          all_finished, lambda: self._zero_inputs, lambda: outputs)
      return finished, next_inputs, state 
開發者ID:google-research,項目名稱:language,代碼行數:11,代碼來源:helpers.py

示例9: sample

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def sample(news_config: GroverConfig, initial_context, eos_token, min_len, ignore_ids=None, p_for_topp=0.95,
           do_topk=False):
    """
    V1 version of: sample outputs from a model, and do it all at once
    :param news_config: Configuration used to construct the model
    :param initial_context: [batch_size, seq_length] that we'll start generating with
    :param eos_token: Stop generating if you see this (tf scalar)
    :param min_len: min length of sample
    :param ignore_ids: NEVER GENERATE THESE [vocab_size]
    :return:
    """
    batch_size, _ = get_shape_list(initial_context, expected_rank=2)

    if ignore_ids is None:
        ignore_ids = tf.constant([x == 0 for x in range(news_config.vocab_size)], dtype=tf.bool)

    with tf.name_scope('sample_sequence'):
        # Initial call to get cache
        context_output = initialize_from_context(initial_context, ignore_ids=ignore_ids, news_config=news_config,
                                                 p_for_topp=p_for_topp,
                                                 do_topk=do_topk)
        ctx = context_output['tokens']
        cache = context_output['cache']
        probs = context_output['probs']

        def body(ctx, cache, probs):
            """ for whatever reason this didn't work when I ran it on more than one at once... ugh."""
            next_outputs = sample_step(ctx[:, -1][:, None], ignore_ids=ignore_ids, news_config=news_config,
                                       batch_size=batch_size, p_for_topp=p_for_topp, cache=cache,
                                       do_topk=do_topk)

            # Update everything
            new_cache = tf.concat([cache, next_outputs['new_cache']], axis=-2)
            new_ids = tf.concat([ctx, next_outputs['new_tokens'][:, None]], axis=1)
            new_probs = tf.concat([probs, next_outputs['new_probs'][:, None]], axis=1)
            return [new_ids, new_cache, new_probs]

        def cond(ctx, cache, probs):
            # ctx = tf.Print(ctx,[tf.shape(ctx)])
            is_eos = tf.reduce_all(tf.reduce_any(tf.equal(ctx[:,-1:], eos_token), axis=1))
            is_len = tf.greater(get_shape_list(ctx)[1], min_len)
            return tf.logical_not(tf.logical_and(is_eos, is_len))

        tokens, cache, probs = tf.while_loop(
            cond=cond, body=body, maximum_iterations=1025 - get_shape_list(ctx)[1],
            loop_vars=[ctx, cache, probs],
            shape_invariants=[tf.TensorShape([batch_size, None]),
                              tf.TensorShape(
                                  [batch_size, news_config.num_hidden_layers, 2,
                                   news_config.num_attention_heads,
                                   None, news_config.hidden_size // news_config.num_attention_heads]),
                              tf.TensorShape([batch_size, None]),
                              ],
            back_prop=False,
        )
    return tokens, probs 
開發者ID:imcaspar,項目名稱:gpt2-ml,代碼行數:58,代碼來源:modeling.py

示例10: maybe_split_sequence_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def maybe_split_sequence_lengths(sequence_length, num_splits, total_length):
  """Validates and splits `sequence_length`, if necessary.

  Returned value must be used in graph for all validations to be executed.

  Args:
    sequence_length: A batch of sequence lengths, either sized `[batch_size]`
      and equal to either 0 or `total_length`, or sized
      `[batch_size, num_splits]`.
    num_splits: The scalar number of splits of the full sequences.
    total_length: The scalar total sequence length (potentially padded).

  Returns:
    sequence_length: If input shape was `[batch_size, num_splits]`, returns the
      same Tensor. Otherwise, returns a Tensor of that shape with each input
      length in the batch divided by `num_splits`.
  Raises:
    ValueError: If `sequence_length` is not shaped `[batch_size]` or
      `[batch_size, num_splits]`.
    tf.errors.InvalidArgumentError: If `sequence_length` is shaped
      `[batch_size]` and all values are not either 0 or `total_length`.
  """
  if sequence_length.shape.ndims == 1:
    if total_length % num_splits != 0:
      raise ValueError(
          '`total_length` must be evenly divisible by `num_splits`.')
    with tf.control_dependencies(
        [tf.Assert(
            tf.reduce_all(
                tf.logical_or(tf.equal(sequence_length, 0),
                              tf.equal(sequence_length, total_length))),
            data=[sequence_length])]):
      sequence_length = (
          tf.tile(tf.expand_dims(sequence_length, axis=1), [1, num_splits]) //
          num_splits)
  elif sequence_length.shape.ndims == 2:
    with tf.control_dependencies([
        tf.assert_less_equal(
            sequence_length,
            tf.constant(total_length // num_splits, tf.int32),
            message='Segment length cannot be more than '
                    '`total_length / num_splits`.')]):
      sequence_length = tf.identity(sequence_length)
    sequence_length.set_shape([sequence_length.shape[0], num_splits])
  else:
    raise ValueError(
        'Sequence lengths must be given as a vector or a 2D Tensor whose '
        'second dimension size matches its initial hierarchical split. Got '
        'shape: %s' % sequence_length.shape.as_list())
  return sequence_length 
開發者ID:magenta,項目名稱:magenta,代碼行數:52,代碼來源:lstm_utils.py

示例11: next_inputs

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with tf.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                       [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledOutputTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))
      sample_ids = tf.cast(sample_ids, tf.bool)

      def maybe_sample():
        """Perform scheduled sampling."""

        def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
          """Concatenate outputs with auxiliary inputs, if they exist."""
          if self._auxiliary_input_tas is None:
            return outputs_

          next_time = time + 1
          auxiliary_inputs = tf.nest.map_structure(
              lambda ta: ta.read(next_time), self._auxiliary_input_tas)
          if indices is not None:
            auxiliary_inputs = tf.gather_nd(auxiliary_inputs, indices)
          return tf.nest.map_structure(
              lambda x, y: tf.concat((x, y), -1),
              outputs_, auxiliary_inputs)

        if self._next_inputs_fn is None:
          return tf.where(
              sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
              base_next_inputs)

        where_sampling = tf.cast(
            tf.where(sample_ids), tf.int32)
        where_not_sampling = tf.cast(
            tf.where(tf.logical_not(sample_ids)), tf.int32)
        outputs_sampling = tf.gather_nd(outputs, where_sampling)
        inputs_not_sampling = tf.gather_nd(base_next_inputs,
                                           where_not_sampling)
        sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
            self._next_inputs_fn(outputs_sampling), where_sampling)

        base_shape = tf.shape(base_next_inputs)
        return (tf.scatter_nd(indices=where_sampling,
                              updates=sampled_next_inputs,
                              shape=base_shape)
                + tf.scatter_nd(indices=where_not_sampling,
                                updates=inputs_not_sampling,
                                shape=base_shape))

      all_finished = tf.reduce_all(finished)
      no_samples = tf.logical_not(tf.reduce_any(sample_ids))
      next_inputs = tf.cond(
          tf.logical_or(all_finished, no_samples),
          lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state) 
開發者ID:magenta,項目名稱:magenta,代碼行數:60,代碼來源:seq2seq.py

示例12: ApplyDepthImageDistortions

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def ApplyDepthImageDistortions(depth_images,
                               random_noise_level = 0.05,
                               random_noise_apply_probability = 0.5,
                               scaling_noise = True,
                               gamma_shape = 1000.0,
                               gamma_scale_inverse = 1000.0,
                               min_depth_allowed = 0.25,
                               max_depth_allowed = 2.5):
  """Apply photometric distortions to the input depth images.

  Args:
    depth_images: Tensor of shape [batch_size, h, w, 1] containing a batch of
      depth images to apply the random photometric distortions to.
    random_noise_level: The standard deviation of the Gaussian distribution for
      the noise that is applied to the depth image. When 0.0, then no noise is
      applied.
    random_noise_apply_probability: Probability of applying additive random
      noise to the images.
    scaling_noise: If True; sample a random variable from a Gamma distribution
      to scale the depth image.
    gamma_shape: Float; shape parameter of a Gamma distribution.
    gamma_scale_inverse: Float; inverse of scale parameter of a Gamma
      distribution.
    min_depth_allowed: Float; minimum clip value for depth.
    max_depth_allowed: Float; max clip value for depth.

  Returns:
    depth_images: Tensor of shape [batch_size, h, w, 1] containing a
      batch of images resulting from applying random photometric distortions to
      the inputs.
  """
  assert depth_images[0].get_shape().as_list()[-1] == 1
  with tf.variable_scope('distortions_depth_images'):
    # Add random Gaussian noise.
    if random_noise_level:
      for i, image in enumerate(depth_images):
        img_shape = tf.shape(image)
        rnd_noise = tf.random_normal(img_shape, stddev=random_noise_level)

        def ReturnImageTensor(value):
          return lambda: value

        if scaling_noise:
          alpha = tf.random_gamma([], gamma_shape, gamma_scale_inverse)
        image = tf.cond(
            tf.reduce_all(
                tf.greater(
                    tf.random.uniform([1]), random_noise_apply_probability)),
            ReturnImageTensor(image),
            ReturnImageTensor(alpha * image + rnd_noise))
        depth_images[i] = tf.reshape(image, img_shape)

    # Clip to valid range.
    for i, image in enumerate(depth_images):
      depth_images[i] = tf.clip_by_value(image, min_depth_allowed,
                                         max_depth_allowed)
  return depth_images 
開發者ID:google-research,項目名稱:tensor2robot,代碼行數:59,代碼來源:image_transformations.py

示例13: sample_mask_indices

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def sample_mask_indices(tokens, mask_rate, mask_blacklist, max_num_to_mask):
  """Samples indices to mask.

  Args:
    tokens (Tensor): 1-D string Tensor.
    mask_rate (float): percentage of tokens to mask.
    mask_blacklist (Tensor): 1-D string Tensor of tokens to NEVER mask.
    max_num_to_mask (int): max # of masks.

  Returns:
    mask_indices (Tensor): 1-D int32 Tensor of indices to mask.
  """
  if mask_rate < 0 or mask_rate > 1:
    raise ValueError("mask_rate must be within [0, 1].")

  # Compute how many tokens to mask.
  num_tokens = tf.size(tokens)
  num_to_mask = tf.to_int32(tf.ceil(mask_rate * tf.to_float(num_tokens)))

  if mask_rate > 0:
    # If masking is enabled, then mask at least one, no matter what.
    # Original BERT code does this too.
    num_to_mask = tf.maximum(num_to_mask, 1)

  num_to_mask = tf.minimum(num_to_mask, max_num_to_mask)

  # If there are any [CLS] or [SEP], we count these as part of num_tokens.
  # Note that the original implementation of BERT does this as well.

  all_indices = tf.range(num_tokens)

  # Filter out indices containing CLS and SEP.
  allow_masking = tf.reduce_all(
      tf.not_equal(tokens, mask_blacklist[:, None]), axis=0)

  filtered_indices = tf.boolean_mask(all_indices, allow_masking)

  # Randomly select indices without replacement.
  shuffled_indices = tf.random.shuffle(filtered_indices)
  mask_indices = shuffled_indices[:num_to_mask]

  return mask_indices 
開發者ID:google-research,項目名稱:language,代碼行數:44,代碼來源:preprocess.py

示例14: _continue_search

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_all [as 別名]
def _continue_search(self, state):
    """Return whether to continue the search loop.

    The loops should terminate when
      1) when decode length has been reached, or
      2) when the worst score in the finished sequences is better than the best
         score in the alive sequences (i.e. the finished sequences are provably
         unchanging)

    Args:
      state: A dictionary with the current loop state.

    Returns:
      Bool tensor with value True if loop should continue, False if loop should
      terminate.
    """
    i = state[_StateKeys.CUR_INDEX]
    alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
    finished_scores = state[_StateKeys.FINISHED_SCORES]
    finished_flags = state[_StateKeys.FINISHED_FLAGS]

    not_at_max_decode_length = tf.less(i, self.max_decode_length)

    # Calculate largest length penalty (the larger penalty, the better score).
    max_length_norm = _length_normalization(self.alpha, self.max_decode_length,
                                            dtype=self.dtype)
    # Get the best possible scores from alive sequences.
    best_alive_scores = alive_log_probs[:, 0] / max_length_norm

    # Compute worst score in finished sequences for each batch element
    finished_scores *= tf.cast(finished_flags,
                               self.dtype)  # set filler scores to zero
    lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)

    # If there are no finished sequences in a batch element, then set the lowest
    # finished score to -INF for that element.
    finished_batches = tf.reduce_any(finished_flags, 1)
    lowest_finished_scores += ((1.0 -
                                tf.cast(finished_batches, self.dtype)) *
                               -inf(self.dtype))

    worst_finished_score_better_than_best_alive_score = tf.reduce_all(
        tf.greater(lowest_finished_scores, best_alive_scores)
    )

    return tf.logical_and(
        not_at_max_decode_length,
        tf.logical_not(worst_finished_score_better_than_best_alive_score)
    ) 
開發者ID:tensorflow,項目名稱:models,代碼行數:51,代碼來源:beam_search_v1.py


注:本文中的tensorflow.compat.v1.reduce_all方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。