當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.stop_gradient方法代碼示例

本文整理匯總了Python中tensorflow.stop_gradient方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.stop_gradient方法的具體用法?Python tensorflow.stop_gradient怎麽用?Python tensorflow.stop_gradient使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.stop_gradient方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_hint_pool_idxs

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def get_hint_pool_idxs(self, normalized_query):
    """Get small set of idxs to compute nearest neighbor queries on.

    This is an expensive look-up on the whole memory that is used to
    avoid more expensive operations later on.

    Args:
      normalized_query: A Tensor of shape [None, key_dim].

    Returns:
      A Tensor of shape [None, choose_k] of indices in memory
      that are closest to the queries.

    """
    # look up in large memory, no gradients
    with tf.device(self.nn_device):
      similarities = tf.matmul(tf.stop_gradient(normalized_query),
                               self.mem_keys, transpose_b=True, name='nn_mmul')
    _, hint_pool_idxs = tf.nn.top_k(
        tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk')
    return hint_pool_idxs 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,代碼來源:memory.py

示例2: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def call(self, inputs):
        mean_and_log_std = self.model(inputs)
        mean, log_std = tf.split(mean_and_log_std, num_or_size_splits=2, axis=1)
        log_std = tf.clip_by_value(log_std, -20., 2.)
        
        distribution = tfp.distributions.MultivariateNormalDiag(
            loc=mean,
            scale_diag=tf.exp(log_std)
        )
        
        raw_actions = distribution.sample()
        if not self._reparameterize:
            ### Problem 1.3.A
            ### YOUR CODE HERE
            raw_actions = tf.stop_gradient(raw_actions)
        log_probs = distribution.log_prob(raw_actions)
        log_probs -= self._squash_correction(raw_actions)

        ### Problem 2.A
        ### YOUR CODE HERE
        self.actions = tf.tanh(raw_actions)
            
        return self.actions, log_probs 
開發者ID:xuwd11,項目名稱:cs294-112_hws,代碼行數:25,代碼來源:nn.py

示例3: vq_nearest_neighbor

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:21,代碼來源:transformer_nat.py

示例4: shake_shake_branch

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
                       hparams):
  """Building a 2 branching convnet."""
  is_training = hparams.mode == tf.contrib.learn.ModeKeys.TRAIN
  x = tf.nn.relu(x)
  x = tf.layers.conv2d(
      x,
      output_filters, (3, 3),
      strides=(stride, stride),
      padding="SAME",
      name="conv1")
  x = tf.layers.batch_normalization(x, training=is_training, name="bn1")
  x = tf.nn.relu(x)
  x = tf.layers.conv2d(x, output_filters, (3, 3), padding="SAME", name="conv2")
  x = tf.layers.batch_normalization(x, training=is_training, name="bn2")
  if is_training:
    x = x * rand_backward + tf.stop_gradient(x * rand_forward -
                                             x * rand_backward)
  else:
    x *= 1.0 / hparams.shake_shake_num_branches
  return x 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:shake_shake.py

示例5: bit_to_int

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def bit_to_int(x_bit, num_bits, base=2):
  """Turn x_bit representing numbers bitwise (lower-endian) to int tensor.

  Args:
    x_bit: Tensor containing numbers in a particular base to be converted to
      int.
    num_bits: Number of bits in the representation.
    base: Base of the representation.

  Returns:
    Integer representation of this number.
  """
  x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))
  x_labels = []
  for i in range(num_bits):
    x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i))
  res = sum(x_labels)
  return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:20,代碼來源:discretization.py

示例6: isemhash_bottleneck

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise,
                        discretize_warmup_steps, mode,
                        isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,
                            mode == tf.estimator.ModeKeys.TRAIN,
                            max_prob=isemhash_mix_prob)
    return d, 0.0 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:discretization.py

示例7: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
        X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
        vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
        wd_dict = {}
        h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
        sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
        wd_loss = tf.get_collection("vf_losses", None)
        loss = tf.reduce_mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
        loss_sampled = tf.reduce_mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
        self._predict = U.function([X], vpred_n)
        optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
                                    clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
                                    async=1, kfac_update=2, cold_iter=50, \
                                    weight_decay_dict=wd_dict, max_grad_norm=None)
        vf_var_list = []
        for var in tf.trainable_variables():
            if "vf" in var.name:
                vf_var_list.append(var)

        update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
        self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
        U.initialize() # Initialize uninitialized TF variables 
開發者ID:Hwhitetooth,項目名稱:lirpg,代碼行數:26,代碼來源:value_functions.py

示例8: gumbel_softmax

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def gumbel_softmax(logits, temperature, hard=False):
    """Sample from the Gumbel-Softmax distribution and optionally discretize.
    Args:
    logits: [batch_size, n_class] unnormalized log-probs
    temperature: non-negative scalar
    hard: if True, take argmax, but differentiate w.r.t. soft sample y
    Returns:
    [batch_size, n_class] sample from the Gumbel-Softmax distribution.
    If hard=True, then the returned sample will be one-hot, otherwise it will
    be a probabilitiy distribution that sums to 1 across classes
    """
    y = gumbel_softmax_sample(logits, temperature)
    if hard:
        # k = tf.shape(logits)[-1]
        # y_hard = tf.cast(tf.one_hot(tf.argmax(y, 1), k), y.dtype)
        y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
        y = tf.stop_gradient(y_hard - y) + y
    return y 
開發者ID:simonkamronn,項目名稱:kvae,代碼行數:20,代碼來源:nn.py

示例9: gumbelSoftmax

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def gumbelSoftmax(logits, temperature, train): # hard = False
    # Sample from the Gumbel-Softmax distribution and optionally discretize.
    # Args:
    #    logits: [batch_size, n_class] unnormalized log-probs
    #    temperature: non-negative scalar
    #    hard: if True, take argmax, but differentiate w.r.t. soft sample y
    # Returns:
    #    [batch_size, n_class] sample from the Gumbel-Softmax distribution.
    #    If hard=True, then the returned sample will be one-hot, otherwise it will
    #    be a probabilitiy distribution that sums to 1 across classes

    y = gumbelSoftmaxSample(logits, temperature)

    # k = tf.shape(logits)[-1]
    # yHard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
    yHard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims = True)), y.dtype)
    yNew = tf.stop_gradient(yHard - y) + y

    if config.gumbelSoftmaxBoth:
        return y
    if config.gumbelArgmaxBoth:
        return yNew
    ret = tf.cond(train, lambda: y, lambda: yNew)
    
    return ret 
開發者ID:stanfordnlp,項目名稱:mac-network,代碼行數:27,代碼來源:ops.py

示例10: kernel

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def kernel(self, particle_tensor, h=-1):
        euclidean_dists = tf_utils.pdist(particle_tensor)
        pairwise_dists = tf_utils.squareform(euclidean_dists) ** 2

        if h == -1:
            if FLAGS.kernel == 'org':
                mean_dist = tf_utils.median(pairwise_dists)  # tf.reduce_mean(euclidean_dists) ** 2
                h = mean_dist / math.log(self.num_particles)
                h = tf.stop_gradient(h)
            elif FLAGS.kernel == 'med':
                mean_dist = tf_utils.median(euclidean_dists) ** 2
                h = mean_dist / math.log(self.num_particles)
                h = tf.stop_gradient(h)
            else:
                mean_dist = tf.reduce_mean(euclidean_dists) ** 2
                h = mean_dist / math.log(self.num_particles)

        kernel_matrix = tf.exp(-pairwise_dists / h)
        kernel_sum = tf.reduce_sum(kernel_matrix, axis=1, keep_dims=True)
        grad_kernel = -tf.matmul(kernel_matrix, particle_tensor)
        grad_kernel += particle_tensor * kernel_sum
        grad_kernel /= h
        return kernel_matrix, grad_kernel, h 
開發者ID:jsikyoon,項目名稱:bmaml,代碼行數:25,代碼來源:bmaml.py

示例11: _sample

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def _sample(self, n_samples):
        mean, cov_tril = self.mean, self.cov_tril
        if not self.is_reparameterized:
            mean = tf.stop_gradient(mean)
            cov_tril = tf.stop_gradient(cov_tril)

        def tile(t):
            new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0)
            return tf.tile(tf.expand_dims(t, 0), new_shape)

        batch_mean = tile(mean)
        batch_cov = tile(cov_tril)
        # n_dim -> n_dim x 1 for matmul
        batch_mean = tf.expand_dims(batch_mean, -1)
        noise = tf.random_normal(tf.shape(batch_mean), dtype=self.dtype)
        samples = tf.matmul(batch_cov, noise) + batch_mean
        samples = tf.squeeze(samples, -1)
        # Update static shape
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(tf.TensorShape([static_n_samples])
                          .concatenate(self.get_batch_shape())
                          .concatenate(self.get_value_shape()))
        return samples 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:25,代碼來源:multivariate.py

示例12: _sample

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:22,代碼來源:univariate.py

示例13: get_or_guess_labels

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.
        If 'y' is in kwargs, then assume it's an untargeted attack and
        use that as the label.
        If 'y_target' is in kwargs and is not none, then assume it's a
        targeted attack and use that as the label.
        Otherwise, use the model's prediction as the label and perform an
        untargeted attack.
        """
        import tensorflow as tf

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs and kwargs['y_target'] is not None:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = reduce_max(preds, 1, keepdims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = tf.stop_gradient(original_predictions)
        if isinstance(labels, np.ndarray):
            nb_classes = labels.shape[1]
        else:
            nb_classes = labels.get_shape().as_list()[1]
        return labels, nb_classes 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:31,代碼來源:attacks.py

示例14: vatm

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def vatm(model,
         x,
         logits,
         eps,
         num_iterations=1,
         xi=1e-6,
         clip_min=None,
         clip_max=None,
         scope=None):
    """
    Tensorflow implementation of the perturbation method used for virtual
    adversarial training: https://arxiv.org/abs/1507.00677
    :param model: the model which returns the network unnormalized logits
    :param x: the input placeholder
    :param logits: the model's unnormalized output tensor (the input to
                   the softmax layer)
    :param eps: the epsilon (input variation parameter)
    :param num_iterations: the number of iterations
    :param xi: the finite difference parameter
    :param clip_min: optional parameter that can be used to set a minimum
                    value for components of the example returned
    :param clip_max: optional parameter that can be used to set a maximum
                    value for components of the example returned
    :param seed: the seed for random generator
    :return: a tensor for the adversarial example
    """
    with tf.name_scope(scope, "virtual_adversarial_perturbation"):
        d = tf.random_normal(tf.shape(x), dtype=tf_dtype)
        for i in range(num_iterations):
            d = xi * utils_tf.l2_batch_normalize(d)
            logits_d = model.get_logits(x + d)
            kl = utils_tf.kl_with_logits(logits, logits_d)
            Hd = tf.gradients(kl, d)[0]
            d = tf.stop_gradient(Hd)
        d = eps * utils_tf.l2_batch_normalize(d)
        adv_x = x + d
        if (clip_min is not None) and (clip_max is not None):
            adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
        return adv_x 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:41,代碼來源:attacks_tf.py

示例15: build_optim

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import stop_gradient [as 別名]
def build_optim(self):
        # Update moving_mean and moving_variance for batch normalization layers
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):

            with tf.name_scope('reinforce'):
                # Actor learning rate
                self.lr1 = tf.train.exponential_decay(self.lr1_start, self.global_step, self.lr1_decay_step,self.lr1_decay_rate, staircase=False, name="learning_rate1")
                # Optimizer
                self.opt1 = tf.train.AdamOptimizer(learning_rate=self.lr1,beta1=0.9,beta2=0.99, epsilon=0.0000001)
                # Discounted reward
                self.reward_baseline = tf.stop_gradient(self.reward - self.critic.predictions) # [Batch size, 1]
                variable_summaries('reward_baseline',self.reward_baseline, with_max_min = True)
                # Loss
                self.loss1 = tf.reduce_mean(self.reward_baseline*self.log_softmax,0)
                tf.summary.scalar('loss1', self.loss1)
                # Minimize step
                gvs = self.opt1.compute_gradients(self.loss1)
                capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs if grad is not None] # L2 clip
                self.train_step1 = self.opt1.apply_gradients(capped_gvs, global_step=self.global_step)

            with tf.name_scope('state_value'):
                # Critic learning rate
                self.lr2 = tf.train.exponential_decay(self.lr2_start, self.global_step2, self.lr2_decay_step,self.lr2_decay_rate, staircase=False, name="learning_rate1")
                # Optimizer
                self.opt2 = tf.train.AdamOptimizer(learning_rate=self.lr2,beta1=0.9,beta2=0.99, epsilon=0.0000001)
                # Loss
                self.loss2 = tf.losses.mean_squared_error(self.reward, self.critic.predictions, weights = 1.0)
                tf.summary.scalar('loss2', self.loss1)
                # Minimize step
                gvs2 = self.opt2.compute_gradients(self.loss2)
                capped_gvs2 = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs2 if grad is not None] # L2 clip
                self.train_step2 = self.opt1.apply_gradients(capped_gvs2, global_step=self.global_step2) 
開發者ID:MichelDeudon,項目名稱:neural-combinatorial-optimization-rl-tensorflow,代碼行數:35,代碼來源:actor.py


注:本文中的tensorflow.stop_gradient方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。