當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.stop_gradient方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.stop_gradient方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.stop_gradient方法的具體用法?Python v1.stop_gradient怎麽用?Python v1.stop_gradient使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.stop_gradient方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: mask_from_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None):
  """Convert a length scalar to a vector of binary masks.

  This function will convert a vector of lengths to a matrix of binary masks.
  E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]]

  Args:
    lengths: a d-dimensional vector of integers corresponding to lengths.
    max_length: an optional (default: None) scalar-like or 0-dimensional tensor
      indicating the maximum length of the masks. If not provided, the maximum
      length will be inferred from the lengths vector.
    dtype: the dtype of the returned mask, if specified. If None, the dtype of
      the lengths will be used.
    name: a name for the operation (optional).

  Returns:
    A d x max_length tensor of binary masks (int32).
  """
  with tf.name_scope(name, 'mask_from_lengths'):
    dtype = lengths.dtype if dtype is None else dtype
    max_length = tf.reduce_max(lengths) if max_length is None else max_length
    indexes = tf.range(max_length, dtype=lengths.dtype)
    mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1))
    cast_mask = tf.cast(mask, dtype)
  return tf.stop_gradient(cast_mask) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:27,代碼來源:utils.py

示例2: vq_nearest_neighbor

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:transformer_nat.py

示例3: gumbel_sample

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def gumbel_sample(self, reconstr_gan):
    hparams = self.hparams
    is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
    vocab_size = self._problem_hparams.vocab_size["targets"]
    if hasattr(self._hparams, "vocab_divisor"):
      vocab_size += (-vocab_size) % self._hparams.vocab_divisor
    reconstr_gan = tf.nn.log_softmax(reconstr_gan)
    if is_training and hparams.gumbel_temperature > 0.0:
      gumbel_samples = discretization.gumbel_sample(
          common_layers.shape_list(reconstr_gan))
      gumbel_samples *= hparams.gumbel_noise_factor
      reconstr_gan += gumbel_samples
      reconstr_sample = latent_layers.multinomial_sample(
          reconstr_gan, temperature=hparams.gumbel_temperature)
      reconstr_gan = tf.nn.softmax(reconstr_gan / hparams.gumbel_temperature)
    else:
      reconstr_sample = tf.argmax(reconstr_gan, axis=-1)
      reconstr_gan = tf.nn.softmax(reconstr_gan / 0.1)  # Sharpen a bit.
    # Use 1-hot forward, softmax backward.
    reconstr_hot = tf.one_hot(reconstr_sample, vocab_size)
    reconstr_gan += reconstr_hot - tf.stop_gradient(reconstr_gan)
    return reconstr_gan 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:autoencoders.py

示例4: shake_shake_branch

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
                       hparams):
  """Building a 2 branching convnet."""
  is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
  x = tf.nn.relu(x)
  x = tf.layers.conv2d(
      x,
      output_filters, (3, 3),
      strides=(stride, stride),
      padding="SAME",
      name="conv1")
  x = tf.layers.batch_normalization(x, training=is_training, name="bn1")
  x = tf.nn.relu(x)
  x = tf.layers.conv2d(x, output_filters, (3, 3), padding="SAME", name="conv2")
  x = tf.layers.batch_normalization(x, training=is_training, name="bn2")
  if is_training:
    x = x * rand_backward + tf.stop_gradient(x * rand_forward -
                                             x * rand_backward)
  else:
    x *= 1.0 / hparams.shake_shake_num_branches
  return x 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:shake_shake.py

示例5: pixels_from_softmax

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def pixels_from_softmax(frame_logits, pure_sampling=False,
                        temperature=1.0, gumbel_noise_factor=0.2):
  """Given frame_logits from a per-pixel softmax, generate colors."""
  # If we're purely sampling, just sample each pixel.
  if pure_sampling or temperature == 0.0:
    return common_layers.sample_with_temperature(frame_logits, temperature)

  # Gumbel-sample from the pixel sofmax and average by pixel values.
  pixel_range = tf.to_float(tf.range(256))
  for _ in range(len(frame_logits.get_shape().as_list()) - 1):
    pixel_range = tf.expand_dims(pixel_range, axis=0)

  frame_logits = tf.nn.log_softmax(frame_logits)
  gumbel_samples = discretization.gumbel_sample(
      common_layers.shape_list(frame_logits)) * gumbel_noise_factor

  frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1)
  result = tf.reduce_sum(frame * pixel_range, axis=-1)
  # Round on the forward pass, not on the backward one.
  return result + tf.stop_gradient(tf.round(result) - result) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:base.py

示例6: embedding_lookup

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def embedding_lookup(self, x, means):
    """Compute nearest neighbors and loss for training the embeddings.

    Args:
        x: Batch of encoder continuous latent states sliced/projected into
        shape
        [-1, num_blocks, block_dim].
        means: Embedding means.

    Returns:
        The nearest neighbor in one hot form, the nearest neighbor
        itself, the
        commitment loss, embedding training loss.
    """
    x_means_hot = self.nearest_neighbor(x, means)
    x_means_hot_flat = tf.reshape(
        x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
    x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
    x_means = tf.transpose(x_means, [1, 0, 2])
    q_loss = tf.reduce_mean(
        tf.squared_difference(tf.stop_gradient(x), x_means))
    e_loss = tf.reduce_mean(
        tf.squared_difference(x, tf.stop_gradient(x_means)))
    return x_means_hot, x_means, q_loss, e_loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:vq_discrete.py

示例7: bit_to_int

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def bit_to_int(self, x_bit, num_bits, base=2):
    """Turn x_bit representing numbers bitwise (lower-endian) to int tensor.

    Args:
        x_bit: Tensor containing numbers in a particular base to be
        converted to
        int.
        num_bits: Number of bits in the representation.
        base: Base of the representation.

    Returns:
        Integer representation of this number.
    """
    x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))
    # pylint: disable=g-complex-comprehension
    x_labels = [
        x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)]
    res = sum(x_labels)
    return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:vq_discrete.py

示例8: bit_to_int

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def bit_to_int(x_bit, num_bits, base=2):
  """Turn x_bit representing numbers bitwise (lower-endian) to int tensor.

  Args:
    x_bit: Tensor containing numbers in a particular base to be converted to
      int.
    num_bits: Number of bits in the representation.
    base: Base of the representation.

  Returns:
    Integer representation of this number.
  """
  x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))
  x_labels = [
      x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)]
  res = sum(x_labels)
  return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:discretization.py

示例9: tanh_discrete_bottleneck

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
                             discretize_warmup_steps, mode):
  """Simple discretization through tanh, flip bottleneck_noise many bits."""
  x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
  d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
  if mode == tf.estimator.ModeKeys.TRAIN:
    x += tf.truncated_normal(
        common_layers.shape_list(x), mean=0.0, stddev=0.2)
  x = tf.tanh(x)
  d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
  if mode == tf.estimator.ModeKeys.TRAIN:
    noise = tf.random_uniform(common_layers.shape_list(x))
    noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
    d *= noise
  d = common_layers.mix(d, x, discretize_warmup_steps,
                        mode == tf.estimator.ModeKeys.TRAIN)
  return d, d0 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:discretization.py

示例10: _build_train_op

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def _build_train_op(self):
    """Builds a training op.

    Returns:
      train_op: An op performing one step of training from replay data.
    """
    actions = self._replay.actions
    indices = tf.stack([tf.range(actions.shape[0]), actions], axis=-1)
    replay_chosen_q = tf.gather_nd(
        self._replay_net_outputs.q_heads, indices=indices)
    target = tf.stop_gradient(self._build_target_q_op())
    loss = tf.losses.huber_loss(
        target, replay_chosen_q, reduction=tf.losses.Reduction.NONE)
    q_head_losses = tf.reduce_mean(loss, axis=0)
    final_loss = tf.reduce_mean(q_head_losses)
    if self.summary_writer is not None:
      with tf.variable_scope('Losses'):
        tf.summary.scalar('HuberLoss', final_loss)
    return self.optimizer.minimize(final_loss) 
開發者ID:google-research,項目名稱:batch_rl,代碼行數:21,代碼來源:multi_head_dqn_agent.py

示例11: compute_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def compute_lengths(symbols_list, eos_symbol, name=None,
                    dtype=tf.int64):
  """Computes sequence lengths given end-of-sequence symbol.

  Args:
    symbols_list: list of [batch_size] tensors of symbols (e.g. integers).
    eos_symbol: end of sequence symbol (e.g. integer).
    name: name for the name scope of this op.
    dtype: type of symbols, default: tf.int64.

  Returns:
    Tensor [batch_size] of lengths of sequences.
  """
  with tf.name_scope(name, 'compute_lengths'):
    max_len = len(symbols_list)
    eos_symbol_ = tf.constant(eos_symbol, dtype=dtype)
    # Array with max_len-time where we have EOS, 0 otherwise. Maximum of this is
    # the first EOS in that example.
    ends = [tf.constant(max_len - i, dtype=tf.int64)
            * tf.to_int64(tf.equal(s, eos_symbol_))
            for i, s in enumerate(symbols_list)]
    # Lengths of sequences, or max_len for sequences that didn't have EOS.
    # Note: examples that don't have EOS will have max value of 0 and value of
    # max_len+1 in lens_.
    lens_ = max_len + 1 - tf.reduce_max(tf.stack(ends, 1), axis=1)
    # For examples that didn't have EOS decrease max_len+1 to max_len as the
    # length.
    lens = tf.subtract(lens_, tf.to_int64(tf.equal(lens_, max_len + 1)))
    return tf.stop_gradient(tf.reshape(lens, [-1])) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:31,代碼來源:utils.py

示例12: get_latent_pred_loss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams):
  """Latent prediction and loss."""
  latents_logits = tf.layers.dense(
      latents_pred, 2**hparams.bottleneck_bits, name="extra_logits")
  loss = tf.nn.softmax_cross_entropy_with_logits_v2(
      labels=tf.stop_gradient(latents_discrete_hot), logits=latents_logits)
  return loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:9,代碼來源:transformer_nat.py

示例13: reverse_gradient

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def reverse_gradient(x, lr=1.0):
  return -lr * x + tf.stop_gradient((1.0 + lr) * x) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:4,代碼來源:autoencoders.py

示例14: bottleneck

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def bottleneck(self, x):
    hparams = self.hparams
    x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck"))
    d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0
      d *= noise
    x = common_layers.mix(d, x, hparams.discretize_warmup_steps,
                          hparams.mode == tf.estimator.ModeKeys.TRAIN)
    return x, 0.0 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:13,代碼來源:autoencoders.py

示例15: predict_target_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stop_gradient [as 別名]
def predict_target_lengths(
    encoder_output, inputs_mask, hparams, length_diff=None):
  """Predict target lengths."""
  bound = hparams.lendiff_bound
  inputs_length = tf.cast(tf.reduce_sum(inputs_mask, 1), tf.int32)
  targets_length = inputs_length
  loss = None
  if hparams.predict_target_length:
    encoder_output = gops.reduce_mean_over_l(encoder_output, inputs_mask)
    logits = tf.stop_gradient(encoder_output)
    logits = lenpred_mlp("lenpred", logits, hparams.hidden_size, bound)
    if length_diff is not None:
      labels = tf.maximum(tf.minimum(length_diff, bound), -bound)
      labels = tf.cast(labels + bound, tf.int32)
      labels = tf.stop_gradient(labels)
      loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=labels, logits=logits)
      loss = tf.reduce_mean(loss)
    diff_pred = tf.argmax(logits, 1)
    diff_pred = tf.cast(diff_pred - bound, tf.int32)
    targets_length = inputs_length + diff_pred
    targets_length = tf.maximum(targets_length, 1)
  divi = 4
  targets_length = tf.ceil(targets_length / divi) * divi
  targets_length = tf.cast(targets_length, tf.int32)
  return targets_length, loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:28,代碼來源:transformer_vae_flow_prior_ops.py


注:本文中的tensorflow.compat.v1.stop_gradient方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。