当前位置: 首页>>代码示例>>Python>>正文


Python v1.no_op方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.no_op方法的典型用法代码示例。如果您正苦于以下问题:Python v1.no_op方法的具体用法?Python v1.no_op怎么用?Python v1.no_op使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.no_op方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: weight_noise

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:optimize.py

示例2: setup_optimizer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def setup_optimizer(self):
    """Instantiates learning rate, decay op and train_op among others."""
    # If not training, don't need to add optimizer to the graph.
    if not self.is_training:
      self.train_op = tf.no_op()
      self.learning_rate = tf.no_op()
      return

    self.learning_rate = tf.Variable(
        self.hparams.learning_rate,
        name='learning_rate',
        trainable=False,
        dtype=tf.float32)

    # FIXME 0.5 -> hparams.decay_rate
    self.decay_op = tf.assign(self.learning_rate, 0.5 * self.learning_rate)
    self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
    self.train_op = self.optimizer.minimize(self.loss) 
开发者ID:magenta,项目名称:magenta,代码行数:20,代码来源:lib_graph.py

示例3: testPS

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def testPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)

    self.assertDeviceEqual(deploy_config.clone_device(0),
                           '/job:worker/device:GPU:0')
    self.assertEqual(deploy_config.clone_scope(0), '')
    self.assertDeviceEqual(deploy_config.optimizer_device(),
                           '/job:worker/device:CPU:0')
    self.assertDeviceEqual(deploy_config.inputs_device(),
                           '/job:worker/device:CPU:0')
    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())
    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '') 
开发者ID:tensorflow,项目名称:models,代码行数:25,代码来源:model_deploy_test.py

示例4: testVariablesPS

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def testVariablesPS(self):
    deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)

    with tf.device(deploy_config.variables_device()):
      a = tf.Variable(0)
      b = tf.Variable(0)
      c = tf.no_op()
      d = slim.variable('a', [],
                        caching_device=deploy_config.caching_device())

    self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(a.device, a.value().device)
    self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
    self.assertDeviceEqual(b.device, b.value().device)
    self.assertDeviceEqual(c.device, '')
    self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
    self.assertDeviceEqual(d.value().device, '') 
开发者ID:tensorflow,项目名称:models,代码行数:19,代码来源:model_deploy_test.py

示例5: run_benchmark

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def run_benchmark(bench_cnn, num_iters):
  """Runs the all-reduce benchmark.

  Args:
    bench_cnn: The BenchmarkCNN where params, the variable manager, and other
      attributes are obtained.
    num_iters: Number of iterations to do all-reduce for for.

  Raises:
    ValueError: Invalid params of bench_cnn.
  """
  if bench_cnn.params.variable_update != 'replicated':
    raise ValueError('--variable_update=replicated must be specified to use'
                     'the all-reduce benchmark')
  if bench_cnn.params.variable_consistency == 'relaxed':
    raise ValueError('--variable_consistency=relaxed is not supported')

  benchmark_op = build_graph(bench_cnn.raw_devices,
                             get_var_shapes(bench_cnn.model),
                             bench_cnn.variable_mgr, num_iters)
  init_ops = [
      tf.global_variables_initializer(),
      bench_cnn.variable_mgr.get_post_init_ops()
  ]
  loss_op = tf.no_op()

  if bench_cnn.graph_file:
    path, filename = os.path.split(bench_cnn.graph_file)
    as_text = filename.endswith('txt')
    log_fn('Writing GraphDef as %s to %s' % (
        'text' if as_text else 'binary', bench_cnn.graph_file))
    tf.train.write_graph(tf.get_default_graph().as_graph_def(add_shapes=True),
                         path, filename, as_text)

  run_graph(benchmark_op, bench_cnn, init_ops, loss_op)


# TODO(reedwm): Reduce redundancy with tf_cnn_benchmarks 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:40,代码来源:all_reduce_benchmark.py

示例6: log_deferred_tensor_value

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def log_deferred_tensor_value(self, key, tensor_value, global_step,
                                stack_offset=2, every_n=1):
    """Logs the value of a tensor when the graph is run."""
    caller = '(%s)' % mlperf_log.get_caller(stack_offset, self._root_dir)
    def create_print_op():
      return tf.print(_MLPERF_LOG_PREFIX, self.mlperf_model_name,
                      tf.timestamp(), caller, key,
                      ': { "deferred": true, "value":', tensor_value, '}',
                      output_stream=sys.stdout)
    maybe_print = tf.cond(tf.equal(global_step % every_n, 0), create_print_op,
                          tf.no_op)
    with tf.control_dependencies([maybe_print]):
      return tf.identity(tensor_value) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:15,代码来源:mlperf.py

示例7: _test_image_producer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def _test_image_producer(self, batch_group_size, put_slower_than_get):
    # We use the variable x to simulate a staging area of images. x represents
    # the number of batches in the staging area.
    x = tf.Variable(0, dtype=tf.int32)
    if put_slower_than_get:
      put_dep = self._slow_tensorflow_op()
      get_dep = tf.no_op()
    else:
      put_dep = tf.no_op()
      get_dep = self._slow_tensorflow_op()
    with tf.control_dependencies([put_dep]):
      put_op = x.assign_add(batch_group_size, use_locking=True)
    with tf.control_dependencies([get_dep]):
      get_op = x.assign_sub(1, use_locking=True)
    with self.test_session() as sess:
      sess.run(tf.variables_initializer([x]))
      image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size,
                                              use_python32_barrier=False)
      image_producer.start()
      for _ in range(5 * batch_group_size):
        sess.run(get_op)
        # We assert x is nonnegative, to ensure image_producer never causes
        # an unstage op to block. We assert x is at most 2 * batch_group_size,
        # to ensure it doesn't use too much memory by storing too many batches
        # in the staging area.
        self.assertGreaterEqual(sess.run(x), 0)
        self.assertLessEqual(sess.run(x), 2 * batch_group_size)
        image_producer.notify_image_consumption()
        self.assertGreaterEqual(sess.run(x), 0)
        self.assertLessEqual(sess.run(x), 2 * batch_group_size)

      image_producer.done()
      time.sleep(0.1)
      self.assertGreaterEqual(sess.run(x), 0)
      self.assertLessEqual(sess.run(x), 2 * batch_group_size) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:37,代码来源:cnn_util_test.py

示例8: add_sync_queues_and_barrier

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list):
    """Adds ops to enqueue on all worker queues.

    Args:
      name_prefix: prefixed for the shared_name of ops.
      enqueue_after_list: control dependency from ops.

    Returns:
      An op that should be used as control dependency before starting next step.
    """
    self.sync_queue_counter += 1
    with tf.device(self.sync_queue_devices[(
        self.sync_queue_counter % len(self.sync_queue_devices))]):
      sync_queues = [
          tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]],
                       shared_name='%s%s' % (name_prefix, i))
          for i in range(self.num_workers)]
      queue_ops = []
      # For each other worker, add an entry in a queue, signaling that it can
      # finish this step.
      token = tf.constant(False)
      with tf.control_dependencies(enqueue_after_list):
        for i, q in enumerate(sync_queues):
          if i == self.task_index:
            queue_ops.append(tf.no_op())
          else:
            queue_ops.append(q.enqueue(token))

      # Drain tokens off queue for this worker, one for each other worker.
      queue_ops.append(
          sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))

      return tf.group(*queue_ops) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:35,代码来源:benchmark_cnn.py

示例9: _reset_non_empty

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def _reset_non_empty(self, indices):
    """Reset the batch of environments.

    Args:
      indices: The batch indices of the environments to reset; defaults to all.

    Returns:
      Batch tensor of the new observations.
    """
    reset_video_op = tf.cond(
        self._video_condition,
        lambda: tf.py_func(self._video_reset_writer, [], []),
        tf.no_op)
    with tf.control_dependencies([reset_video_op]):
      inc_op = tf.assign_add(self._episode_counter, 1)
      with tf.control_dependencies([self.history_buffer.reset(indices),
                                    inc_op]):
        initial_frame_dump_op = tf.cond(
            self._video_condition,
            lambda: tf.py_func(self._video_dump_frames,  # pylint: disable=g-long-lambda
                               [self.history_buffer.get_all_elements()], []),
            tf.no_op)
        observ_assign_op = self._observ.assign(
            self.history_buffer.get_all_elements()[:, -1, ...])
        with tf.control_dependencies([observ_assign_op, initial_frame_dump_op]):
          reset_model_op = tf.assign(self._reset_model, tf.constant(1.0))
          with tf.control_dependencies([reset_model_op]):
            return tf.gather(self._observ.read_value(), indices) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:30,代码来源:simulated_batch_env.py

示例10: sparsify

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def sparsify(sess, eval_model, pruning_strategy, pruning_params):
  """Prune the weights of a model and evaluate."""
  weights = tf.trainable_variables()

  def should_prune(name):
    """Whether to prune a weight or not."""
    in_whitelist = not pruning_params.white_list or any(
        e in name for e in pruning_params.white_list)
    in_blacklist = any(e in name for e in pruning_params.black_list)

    if pruning_params.white_list and not in_whitelist:
      return False
    elif in_blacklist:
      return False

    return True

  weights = [w for w in weights if should_prune(w.name)]
  tf.logging.info("Pruning weights: %s" % weights)
  unpruned_weights = sess.run(weights)

  reset_op = tf.no_op()
  for w, ow in zip(weights, unpruned_weights):
    op = tf.assign(w, ow)
    reset_op = tf.group(reset_op, op)

  for sparsity in pruning_params.sparsities:
    set_weights_op = tf.no_op()
    for w in weights:
      op = tf.assign(w, pruning_strategy(w, sparsity))
      set_weights_op = tf.group(set_weights_op, op)
    sess.run(set_weights_op)

    acc = eval_model()
    tf.logging.info("\tPruning to sparsity = %f: acc = %f" % (sparsity, acc))
    sess.run(reset_op) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:38,代码来源:pruning_utils.py

示例11: _finish

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t, use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t, use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)

        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          # TODO(cuong): It is suboptimal here because we have to cast twice
          # (float to int, and then int to float)
          update_iter = iter_.assign(
              tf.cast(
                  tf.mod(tf.cast(iter_ + 1.0, tf.int32), self._n_t),
                  tf.float32),
              use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:28,代码来源:multistep_with_adamoptimizer.py

示例12: reset_internal_states_ops

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def reset_internal_states_ops(self):
    if not self.hparams.concat_internal_states:
      return [[tf.no_op()]]
    zeros = [[tf.zeros_like(s)] for s in self.internal_states[0]]
    return self.save_internal_states_ops(zeros) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:7,代码来源:basic_stochastic.py

示例13: load_internal_states_ops

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def load_internal_states_ops(self):
    if not self.hparams.concat_internal_states:
      return [[tf.no_op()]]
    ops = [[s.read_value()] for s in self.internal_states[0]]
    return ops 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:7,代码来源:basic_stochastic.py

示例14: save_internal_states_ops

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def save_internal_states_ops(self, internal_states):
    if not self.hparams.concat_internal_states:
      return [[tf.no_op()]]
    ops = [[tf.assign(x, y)]
           for x, y in zip(self.internal_states[0], internal_states[0])]
    return ops 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:8,代码来源:basic_stochastic.py

示例15: reset_internal_states_ops

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import no_op [as 别名]
def reset_internal_states_ops(self):
    """Resets internal states to initial values."""
    return [[tf.no_op()]] 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:5,代码来源:base.py


注:本文中的tensorflow.compat.v1.no_op方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。