當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.assign方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.assign方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.assign方法的具體用法?Python v1.assign怎麽用?Python v1.assign使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.assign方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testAppendGradientsWithLossScaleWithAutoScaleDisabled

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def testAppendGradientsWithLossScaleWithAutoScaleDisabled(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=False,  # no auto loss scale.
        loss_scale=tf.Variable(4),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=True)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 4)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 10) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:variable_mgr_util_test.py

示例2: testAppendGradientsWithLossScaleForNonChiefWorker

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def testAppendGradientsWithLossScaleForNonChiefWorker(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=False)  # Non-chief
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=False)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 4)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 10) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:variable_mgr_util_test.py

示例3: testAppendGradientsWithLossScaleWithoutNan

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def testAppendGradientsWithLossScaleWithoutNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(False))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 8)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:variable_mgr_util_test.py

示例4: testAppendGradientsWithLossScaleWithtNan

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def testAppendGradientsWithLossScaleWithtNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(True))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 0)  # Skip updating for v.
      # halve loss_scale and reset local_scale_normal_steps.
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 2)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:25,代碼來源:variable_mgr_util_test.py

示例5: simulate

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def simulate(self, action):

    # There is subtlety here. We need to collect data
    # obs, action = policy(obs), done, reward = env(abs, action)
    # Thus we need to enqueue data before assigning new observation

    reward, done = self._batch_env.simulate(action)

    with tf.control_dependencies([reward, done]):
      enqueue_op = self.speculum.enqueue(
          [self._observ.read_value(), reward, done, action])

    with tf.control_dependencies([enqueue_op]):
      assign = self._observ.assign(self._batch_env.observ)

    with tf.control_dependencies([assign]):
      return tf.identity(reward), tf.identity(done) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:ppo_learner.py

示例6: _apply_cond

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(
            tf.zeros_like(grad_acc), use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:multistep_with_adamoptimizer.py

示例7: _apply_sparse_shared

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
    beta1_power, beta2_power = self._get_beta_accumulators()
    beta1_power = tf.cast(beta1_power, var.dtype.base_dtype)
    beta2_power = tf.cast(beta2_power, var.dtype.base_dtype)
    lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))
    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)
    with tf.control_dependencies([m_t]):
      m_t = scatter_add(m, indices, m_scaled_g_values)
    # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_scaled_g_values = (grad * grad) * (1 - beta2_t)
    v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)
    with tf.control_dependencies([v_t]):
      v_t = scatter_add(v, indices, v_scaled_g_values)
    v_sqrt = tf.sqrt(v_t)
    var_update = tf.assign_sub(
        var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
    return tf.group(*[var_update, m_t, v_t]) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:27,代碼來源:multistep_with_adamoptimizer.py

示例8: setup_optimizer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def setup_optimizer(self):
    """Instantiates learning rate, decay op and train_op among others."""
    # If not training, don't need to add optimizer to the graph.
    if not self.is_training:
      self.train_op = tf.no_op()
      self.learning_rate = tf.no_op()
      return

    self.learning_rate = tf.Variable(
        self.hparams.learning_rate,
        name='learning_rate',
        trainable=False,
        dtype=tf.float32)

    # FIXME 0.5 -> hparams.decay_rate
    self.decay_op = tf.assign(self.learning_rate, 0.5 * self.learning_rate)
    self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
    self.train_op = self.optimizer.minimize(self.loss) 
開發者ID:magenta,項目名稱:magenta,代碼行數:20,代碼來源:lib_graph.py

示例9: _cache_with_update_op

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def _cache_with_update_op(self, tensor):
    """Creates non-trainable variable to cache the tensor across sess.run calls.

    Args:
      tensor: Tensor to cache.

    Returns:
      cached_tensor: Non-trainable variable to contain the cached value
        of `tensor`.
      update_op: TensorFlow op to re-evaluate `tensor` and assign the result
        to `cached_tensor`.
    """
    cached_tensor = tf.get_variable(
        tensor.name.replace(':', '__') + '_ibp_cache',
        shape=tensor.shape, dtype=tensor.dtype, trainable=False)
    update_op = tf.assign(cached_tensor, tensor)
    return cached_tensor, update_op 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:19,代碼來源:bounds.py

示例10: test_piecewise_linear

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def test_piecewise_linear(self, boundaries, values, test_inputs,
                            expected_outputs):
    global_step = tf.train.get_or_create_global_step()
    global_step_value = tf.placeholder(tf.int64, [])
    set_global_step = tf.assign(global_step, global_step_value)

    test_function = global_step_functions.piecewise_linear(boundaries, values)
    with tf.Session() as sess:
      for x, y_expected in zip(test_inputs, expected_outputs):
        sess.run(set_global_step, {global_step_value: x})
        y = sess.run(test_function)
        self.assertEqual(y, y_expected)

    # Test the same with tensors as inputs
    test_function = global_step_functions.piecewise_linear(
        tf.convert_to_tensor(boundaries), tf.convert_to_tensor(values))
    with tf.Session() as sess:
      for x, y_expected in zip(test_inputs, expected_outputs):
        sess.run(set_global_step, {global_step_value: x})
        y = sess.run(test_function)
        self.assertEqual(y, y_expected) 
開發者ID:google-research,項目名稱:tensor2robot,代碼行數:23,代碼來源:global_step_functions_test.py

示例11: test_gaussian_sum_with_changing_clip_no_noise

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def test_gaussian_sum_with_changing_clip_no_noise(self):
    with self.cached_session() as sess:
      record1 = tf.constant([-6.0, 8.0])  # Clipped to [-3.0, 4.0].
      record2 = tf.constant([4.0, -3.0])  # Not clipped.

      l2_norm_clip = tf.Variable(5.0)
      l2_norm_clip_placeholder = tf.placeholder(tf.float32)
      assign_l2_norm_clip = tf.assign(l2_norm_clip, l2_norm_clip_placeholder)
      query = gaussian_query.GaussianSumQuery(
          l2_norm_clip=l2_norm_clip, stddev=0.0)
      query_result, _ = test_utils.run_query(query, [record1, record2])

      self.evaluate(tf.global_variables_initializer())
      result = sess.run(query_result)
      expected = [1.0, 1.0]
      self.assertAllClose(result, expected)

      sess.run(assign_l2_norm_clip, {l2_norm_clip_placeholder: 0.0})
      result = sess.run(query_result)
      expected = [0.0, 0.0]
      self.assertAllClose(result, expected) 
開發者ID:tensorflow,項目名稱:privacy,代碼行數:23,代碼來源:gaussian_query_test.py

示例12: record_sum_query

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def record_sum_query(self, l2_norm_bound, noise_stddev):
    """Records that a query was issued.

    Args:
      l2_norm_bound: The maximum l2 norm of the tensor group in the query.
      noise_stddev: The standard deviation of the noise applied to the sum.

    Returns:
      An operation recording the sum query to the ledger.
    """

    def _do_record_query():
      with tf.control_dependencies(
          [tf.assign(self._query_count, self._query_count + 1)]):
        return self._query_buffer.append(
            [self._sample_count, l2_norm_bound, noise_stddev])

    return self._cs.execute(_do_record_query) 
開發者ID:tensorflow,項目名稱:privacy,代碼行數:20,代碼來源:privacy_ledger.py

示例13: _update_stats

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def _update_stats(self, x):
        ct = tf.maximum(1e-10, self._ct)

        ct_b = tf.to_float(tf.shape(x)[0])
        mu_b, var_b = tf.nn.moments(x, axes=[0])

        delta = mu_b - self._mu

        new_ct = ct + ct_b
        new_mu = self._mu + delta * ct_b / new_ct
        new_var = (self._var * ct + var_b * ct_b + delta ** 2 * ct * ct_b / new_ct) / new_ct

        self.add_update([
            tf.assign(self._ct, new_ct),
            tf.assign(self._mu, new_mu),
            tf.assign(self._var, new_var)
        ])

        return new_mu, new_var 
開發者ID:inoryy,項目名稱:reaver,代碼行數:21,代碼來源:layers.py

示例14: get_checkpoint_init_fn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def get_checkpoint_init_fn():
  """Returns the checkpoint init_fn if the checkpoint is provided."""
  if FLAGS.fine_tune_checkpoint:
    variables_to_restore = slim.get_variables_to_restore()
    global_step_reset = tf.assign(
        tf.train.get_or_create_global_step(), 0)
    # When restoring from a floating point model, the min/max values for
    # quantized weights and activations are not present.
    # We instruct slim to ignore variables that are missing during restoration
    # by setting ignore_missing_vars=True
    slim_init_fn = slim.assign_from_checkpoint_fn(
        FLAGS.fine_tune_checkpoint,
        variables_to_restore,
        ignore_missing_vars=True)

    def init_fn(sess):
      slim_init_fn(sess)
      # If we are restoring from a floating point model, we need to initialize
      # the global step to zero for the exponential decay to result in
      # reasonable learning rates.
      sess.run(global_step_reset)
    return init_fn
  else:
    return None 
開發者ID:tensorflow,項目名稱:models,代碼行數:26,代碼來源:mobilenet_v1_train.py

示例15: get_loss_scale_update_op

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import assign [as 別名]
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
                             inc_loss_scale_every_n):
  """Returns the update op for loss scaling variables.

  We maintain the counter `loss_scale_normal_steps` to count the number of steps
  we have been using the current `loss_scale`. In most cases, this function
  increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
  greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
  and reset `loss_scale_normal_steps` to zero.

  This op is only called if the gradients don't have any infs or nans. Instead,
  if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
  reset `loss_scale_normal_steps` to zero.

  Args:
    loss_scale: a tf.Variable represneting the loss_scale value.
    loss_scale_normal_steps: a tf.Variable representing the number of training
      steps that have run since the loss_scale last changed.
    inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
      increased every `inc_loss_scale_every_n` steps, unless the gradients have
      infs or nans.

  Returns:
    An op for updating `loss_scale` and `loss_scale_normal_steps`.
  """

  def increment_loss_scale_normal_steps_func():
    return tf.group(loss_scale_normal_steps.assign_add(1))

  def increase_loss_scale_func():
    return tf.group(
        tf.assign(loss_scale_normal_steps, 0),
        tf.assign(loss_scale, loss_scale * 2))

  # true_fn and false_fn must have the same type.
  return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
                 increment_loss_scale_normal_steps_func,
                 increase_loss_scale_func) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:40,代碼來源:variable_mgr_util.py


注:本文中的tensorflow.compat.v1.assign方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。