当前位置: 首页>>代码示例>>Python>>正文


Python v1.control_dependencies方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.control_dependencies方法的典型用法代码示例。如果您正苦于以下问题:Python v1.control_dependencies方法的具体用法?Python v1.control_dependencies怎么用?Python v1.control_dependencies使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.control_dependencies方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: append_apply_gradients_ops

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def append_apply_gradients_ops(self, gradient_state, opt, grads, training_ops,
                                 loss_scale_params):
    device_grads = gradient_state  # From 2nd result of preprocess_device_grads.

    def get_apply_gradients_ops_func():
      """Returns a list of ops for updating gradients."""
      apply_gradients_ops = []
      # For each variable, apply the combined gradients for this server on
      # the parameter server, and then wait for all other servers to do this.
      for i, (g, v) in enumerate(grads):
        apply_gradient_op = opt.apply_gradients([(g, v)])
        barrier = self.benchmark_cnn.add_sync_queues_and_barrier(
            'replicate_variable_%s' % i, [apply_gradient_op])
        with tf.control_dependencies([barrier]):
          with tf.device(self.benchmark_cnn.cpu_device):
            updated_value = v.read_value()
            for my_d in range(len(self.benchmark_cnn.devices)):
              apply_gradients_ops.append(
                  device_grads[my_d][i][1].assign(updated_value))
      return apply_gradients_ops

    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops, get_apply_gradients_ops_func, loss_scale_params,
        self.grad_has_inf_nan) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:26,代码来源:variable_mgr.py

示例2: simulate

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def simulate(self, action):

    # There is subtlety here. We need to collect data
    # obs, action = policy(obs), done, reward = env(abs, action)
    # Thus we need to enqueue data before assigning new observation

    reward, done = self._batch_env.simulate(action)

    with tf.control_dependencies([reward, done]):
      enqueue_op = self.speculum.enqueue(
          [self._observ.read_value(), reward, done, action])

    with tf.control_dependencies([enqueue_op]):
      assign = self._observ.assign(self._batch_env.observ)

    with tf.control_dependencies([assign]):
      return tf.identity(reward), tf.identity(done) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:ppo_learner.py

示例3: simulate

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def simulate(self, action):
    reward, done = self._batch_env.simulate(action)
    with tf.control_dependencies([reward, done]):
      new_observ = tf.expand_dims(self._batch_env.observ, axis=1)

      # If we shouldn't stack, i.e. self.history == 1, then just assign
      # new_observ to self._observ and return from here.
      if self.history == 1:
        with tf.control_dependencies([self._observ.assign(new_observ)]):
          return tf.identity(reward), tf.identity(done)

      # If we should stack, then do the required work.
      old_observ = tf.gather(
          self._observ.read_value(),
          list(range(1, self.history)),
          axis=1)
      with tf.control_dependencies([new_observ, old_observ]):
        with tf.control_dependencies([self._observ.assign(
            tf.concat([old_observ, new_observ], axis=1))]):
          return tf.identity(reward), tf.identity(done) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:tf_atari_wrappers.py

示例4: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def __init__(self, *args, **kwargs):
    with tf.Graph().as_default():
      self._batch_env = SimulatedBatchEnv(*args, **kwargs)

      self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
      self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t)
      with tf.control_dependencies([self._rewards_t]):
        self._obs_t = self._batch_env.observ
      self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32)
      self._reset_op = self._batch_env.reset(
          tf.range(self.batch_size, dtype=tf.int32)
      )

      self._sess = tf.Session()
      self._sess.run(tf.global_variables_initializer())
      self._batch_env.initialize(self._sess) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:simulated_batch_gym_env.py

示例5: weight_decay_and_noise

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None):
  """Apply weight decay and weight noise."""
  if var_list is None:
    var_list = tf.trainable_variables()

  decay_vars = [v for v in var_list]
  noise_vars = [v for v in var_list if "/body/" in v.name]

  weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars)
  if hparams.weight_decay and common_layers.should_generate_summaries():
    tf.summary.scalar("losses/weight_decay", weight_decay_loss)
  weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate,
                                  noise_vars)

  with tf.control_dependencies(weight_noise_ops):
    loss = tf.identity(loss)

  loss += weight_decay_loss
  return loss 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:21,代码来源:optimize.py

示例6: _grad_sparsity

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:yellowfin.py

示例7: _apply_cond

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(
            tf.zeros_like(grad_acc), use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:multistep_with_adamoptimizer.py

示例8: _apply_sparse_shared

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
    beta1_power, beta2_power = self._get_beta_accumulators()
    beta1_power = tf.cast(beta1_power, var.dtype.base_dtype)
    beta2_power = tf.cast(beta2_power, var.dtype.base_dtype)
    lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))
    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)
    with tf.control_dependencies([m_t]):
      m_t = scatter_add(m, indices, m_scaled_g_values)
    # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_scaled_g_values = (grad * grad) * (1 - beta2_t)
    v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)
    with tf.control_dependencies([v_t]):
      v_t = scatter_add(v, indices, v_scaled_g_values)
    v_sqrt = tf.sqrt(v_t)
    var_update = tf.assign_sub(
        var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
    return tf.group(*[var_update, m_t, v_t]) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:multistep_with_adamoptimizer.py

示例9: _apply_cond

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
                                              use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:multistep_optimizer.py

示例10: _finish

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:multistep_optimizer.py

示例11: fn_device_dependency

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def fn_device_dependency(name, device=""):
  """Add control deps for name and device."""
  key = name + "_" + device
  outs = []

  def body():
    with tf.control_dependencies(fn_device_dependency_dict()[key]):
      yield outs
      assert outs

      deps = outs
      if isinstance(outs[0], (list, tuple)):
        assert len(outs) == 1
        deps = outs[0]
      fn_device_dependency_dict()[key] = deps

  if device:
    with tf.device(device):
      return body()
  else:
    return body() 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:common_layers.py

示例12: smoothing_cross_entropy_factored

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def smoothing_cross_entropy_factored(a, b, labels, confidence):
  """Memory-efficient computation of smoothing cross-entropy.

  Avoids realizing the entire logits matrix at once.

  Args:
    a: a Tensor with shape [batch, inner_dim]
    b: a Tensor with shape [vocab_size, inner_dim]
    labels: an integer Tensor with shape [batch]
    confidence: a float

  Returns:
    A Tensor with shape [batch]
  """
  num_splits = 16
  vocab_size = shape_list(b)[0]
  labels = approximate_split(labels, num_splits)
  a = approximate_split(a, num_splits)
  parts = []
  for part in range(num_splits):
    with tf.control_dependencies(parts[-1:]):
      logits = tf.matmul(a[part], b, transpose_b=True)
      parts.append(
          smoothing_cross_entropy(logits, labels[part], vocab_size, confidence))
  return tf.concat(parts, 0) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:common_layers.py

示例13: testRead

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def testRead(self):
    batch_size = 2
    key_depth = 3
    val_depth = 5
    memory_size = 4
    window_size = 6
    x_depth = 10
    memory = transformer_memory.TransformerMemory(
        batch_size, key_depth, val_depth, memory_size)
    x = tf.random_uniform([batch_size, window_size, x_depth], minval=1.0)
    vals = tf.random_uniform([batch_size, memory_size, val_depth], minval=1.0)
    logits = tf.random_uniform([batch_size, memory_size], minval=1.0)
    update_op = memory.set(vals, logits)
    with tf.control_dependencies([update_op]):
      logits, retrieved_values = memory.read(x)
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      logits_values, values = session.run([logits, retrieved_values])
    self.assertAllEqual([batch_size, window_size, memory_size],
                        logits_values.shape)
    self.assertAllEqual([batch_size, window_size, val_depth], values.shape) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:transformer_memory_test.py

示例14: testWrite

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def testWrite(self):
    batch_size = 2
    key_depth = 3
    val_depth = 5
    memory_size = 4
    window_size = 6
    x_depth = 10
    memory = transformer_memory.TransformerMemory(
        batch_size, key_depth, val_depth, memory_size)
    x = tf.random_uniform([batch_size, window_size, x_depth], minval=1.0)
    vals = tf.random_uniform([batch_size, memory_size, val_depth], minval=1.0)
    logits = tf.random_uniform([batch_size, memory_size], minval=1.0)
    update_op = memory.set(vals, logits)
    with tf.control_dependencies([update_op]):
      logits, _ = memory.read(x)
      write_op = memory.write(x, logits)
    mem_vals, mem_logits = memory.get()
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      session.run(write_op)
      updated_vals, updated_logits = session.run([mem_vals, mem_logits])
    self.assertAllEqual([batch_size, memory_size, val_depth],
                        updated_vals.shape)
    self.assertAllEqual([batch_size, memory_size], updated_logits.shape) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:transformer_memory_test.py

示例15: testReset

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import control_dependencies [as 别名]
def testReset(self):
    batch_size = 2
    key_depth = 3
    val_depth = 5
    memory_size = 4
    memory = transformer_memory.TransformerMemory(
        batch_size, key_depth, val_depth, memory_size)
    vals = tf.random_uniform([batch_size, memory_size, val_depth], minval=1.0)
    logits = tf.random_uniform([batch_size, memory_size], minval=1.0)
    update_op = memory.set(vals, logits)
    reset_op = memory.reset([1])
    mem_vals, mem_logits = memory.get()
    assert_op1 = tf.assert_equal(mem_vals[0], vals[0])
    assert_op2 = tf.assert_equal(mem_logits[0], logits[0])
    with tf.control_dependencies([assert_op1, assert_op2]):
      all_zero1 = tf.reduce_sum(tf.abs(mem_vals[1]))
      all_zero2 = tf.reduce_sum(tf.abs(mem_logits[1]))
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      session.run(update_op)
      session.run(reset_op)
      zero1, zero2 = session.run([all_zero1, all_zero2])
    self.assertAllEqual(0, zero1)
    self.assertAllEqual(0, zero2) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:transformer_memory_test.py


注:本文中的tensorflow.compat.v1.control_dependencies方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。