当前位置: 首页>>代码示例>>Python>>正文


Python v1.device方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.device方法的典型用法代码示例。如果您正苦于以下问题:Python v1.device方法的具体用法?Python v1.device怎么用?Python v1.device使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.device方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_all_reduce_device_prefixes

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def build_all_reduce_device_prefixes(job_name, num_tasks):
  """Build list of device prefix names for all_reduce.

  Args:
    job_name: 'worker', 'ps' or 'localhost'.
    num_tasks: number of jobs across which device names should be generated.

  Returns:
     A list of device name prefix strings. Each element spells out the full
     host name without adding the device.
     e.g. '/job:worker/task:0'
  """
  if job_name != 'localhost':
    return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)]
  else:
    assert num_tasks == 1
    return ['/job:%s' % job_name] 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:19,代码来源:allreduce.py

示例2: collective_group_key

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def collective_group_key(devices):
  """Returns a group key for the set of devices.

  Args:
    devices: list of strings naming devices in a collective group.

  Returns:
    int key uniquely identifying the set of device names.
  """
  global _group_key
  global _group_key_table
  parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
  names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
  concat = ','.join(names)
  if concat not in _group_key_table.keys():
    new_key = _group_key
    _group_key += 1
    _group_key_table[concat] = new_key
  rv = _group_key_table[concat]
  return rv 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:22,代码来源:allreduce.py

示例3: unpack_grad_tuple

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def unpack_grad_tuple(gv, gpt):
  """Unpack a previously packed collection of gradient tensors.

  Args:
    gv: A (grad, var) pair to be unpacked.
    gpt: A GradPackTuple describing the packing operation that produced gv.

  Returns:
    A list of (grad, var) pairs corresponding to the values that were
     originally packed into gv, maybe following subsequent operations like
     reduction.
  """
  elt_widths = [x.num_elements() for x in gpt.shapes]
  with tf.device(gv[0][0].device):
    with tf.name_scope('unpack'):
      splits = tf.split(gv[0], elt_widths)
      unpacked_gv = []
      for idx, s in enumerate(splits):
        unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
  return unpacked_gv 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:22,代码来源:allreduce.py

示例4: testVariablesSetDeviceMobileModel

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def testVariablesSetDeviceMobileModel(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000
    inputs = tf.random_uniform((batch_size, height, width, 3))
    tf.train.create_global_step()
    # Force all Variables to reside on the device.
    with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
      with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
        nasnet.build_nasnet_mobile(inputs, num_classes)
    with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
      with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
        nasnet.build_nasnet_mobile(inputs, num_classes)
    for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
      self.assertDeviceEqual(v.device, '/cpu:0')
    for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
      self.assertDeviceEqual(v.device, '/gpu:0') 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:19,代码来源:nasnet_test.py

示例5: defer_single_device_tensors

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def defer_single_device_tensors(device_tensors):
  """Defer tensors (gradients in this case) from a single device.

  Arguments:
    device_tensors: A list of gradients tensors from a single device to defer.

  Returns:
    deferred_tensors: A list of tensors deferred for one step.
    put_ops: A list of ops that put `tensors` in the StagingAreas. Must be run
      every step that `deferred_tensors` is run.
    warmup_ops: Warmup ops that should be called before the first step. Puts
      zero tensors into the StagingArea.
  """
  put_ops = []
  warmup_ops = []
  deferred_tensors = []

  for tensor in device_tensors:
    deferred_tensor, put_op, warmup_op = _defer_tensor(tensor)
    deferred_tensors.append(deferred_tensor)
    put_ops.append(put_op)
    warmup_ops.append(warmup_op)

  return deferred_tensors, put_ops, warmup_ops 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:26,代码来源:batch_allreduce.py

示例6: trainable_variables_on_device

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
                                    writable):
    """Return the set of trainable variables on the specified device.

    Args:
      rel_device_num: local worker device index.
      abs_device_num: global graph device index.
      writable: whether the returned variables is writable or read-only.

    Returns:
      Return the set of trainable variables on the specified device.
    """
    del abs_device_num
    params_refs = tf.trainable_variables()
    if writable:
      return params_refs
    params = []
    for param in params_refs:
      var_name = param.name.split(':')[0]
      _, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
          var_name]
      params.append(var_get_op)
    return params 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:25,代码来源:variable_mgr_util.py

示例7: trainable_variables_on_device

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def trainable_variables_on_device(self,
                                    rel_device_num,
                                    abs_device_num,
                                    writable=False):
    """Return the set of trainable variables on device.

    Args:
      rel_device_num: local worker device index.
      abs_device_num: global graph device index.
      writable: whether to get a reference to the underlying variable.

    Returns:
      The set of trainable variables on the specified device.
    """
    del rel_device_num, writable
    if self.each_tower_has_variables():
      params = [
          v for v in tf.trainable_variables()
          if v.name.startswith('v%s/' % abs_device_num)
      ]
    else:
      params = tf.trainable_variables()
    return params 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:25,代码来源:variable_mgr.py

示例8: append_apply_gradients_ops

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def append_apply_gradients_ops(self, gradient_state, opt, grads, training_ops,
                                 loss_scale_params):
    device_grads = gradient_state  # From 2nd result of preprocess_device_grads.

    def get_apply_gradients_ops_func():
      """Returns a list of ops for updating gradients."""
      apply_gradients_ops = []
      # For each variable, apply the combined gradients for this server on
      # the parameter server, and then wait for all other servers to do this.
      for i, (g, v) in enumerate(grads):
        apply_gradient_op = opt.apply_gradients([(g, v)])
        barrier = self.benchmark_cnn.add_sync_queues_and_barrier(
            'replicate_variable_%s' % i, [apply_gradient_op])
        with tf.control_dependencies([barrier]):
          with tf.device(self.benchmark_cnn.cpu_device):
            updated_value = v.read_value()
            for my_d in range(len(self.benchmark_cnn.devices)):
              apply_gradients_ops.append(
                  device_grads[my_d][i][1].assign(updated_value))
      return apply_gradients_ops

    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops, get_apply_gradients_ops_func, loss_scale_params,
        self.grad_has_inf_nan) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:26,代码来源:variable_mgr.py

示例9: _config_benchmark_logger

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def _config_benchmark_logger(self):
    """Config the model garden benchmark logger."""
    model_benchmark_logger = None
    if self.params.benchmark_log_dir is not None:
      try:
        from official.r1.utils.logs import logger as models_logger  # pylint: disable=g-import-not-at-top
      except ImportError:
        tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH '
                         'in order to use BenchmarkLogger. Configured '
                         'benchmark_log_dir: %s'
                         % self.params.benchmark_log_dir)
        raise
      model_benchmark_logger = models_logger.BenchmarkFileLogger(
          self.params.benchmark_log_dir)
    self.benchmark_logger = model_benchmark_logger

  # TODO(laigd): this changes the global device list which is used everywhere,
  # consider refactoring it. 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:20,代码来源:benchmark_cnn.py

示例10: _get_params_info

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def _get_params_info(self):
    """Get the common parameters info for the benchmark run.

    Returns:
      A dict of processed parameters.
    """
    dataset_name = self.dataset.name
    if self.dataset.use_synthetic_gpu_inputs():
      dataset_name += ' (synthetic)'
    single_session = self.params.variable_update == 'distributed_all_reduce'
    if single_session:
      device_list = self.raw_devices_across_tasks()
    elif self.params.variable_update == 'horovod':
      device_list = ['horovod/%s:%d' % (self.params.device, idx)
                     for idx in range(self.num_workers)]
    else:
      device_list = self.raw_devices
    return {
        'dataset_name': dataset_name,
        'single_session': single_session,
        'device_list': device_list,} 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:23,代码来源:benchmark_cnn.py

示例11: weight_noise

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:optimize.py

示例12: weight_decay

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def weight_decay(decay_rate, var_list, skip_biases=True):
  """Apply weight decay to vars in var_list."""
  if not decay_rate:
    return 0.

  tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate)

  weight_decays = []
  for v in var_list:
    # Weight decay.
    # This is a heuristic way to detect biases that works for main tf.layers.
    is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0")
    if not (skip_biases and is_bias):
      with tf.device(v.device):
        v_loss = tf.nn.l2_loss(v)
      weight_decays.append(v_loss)

  return tf.add_n(weight_decays) * decay_rate 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:optimize.py

示例13: cast_like

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def cast_like(x, y):
  """Cast x to y's dtype, if necessary."""
  x = tf.convert_to_tensor(x)
  y = tf.convert_to_tensor(y)

  if x.dtype.base_dtype == y.dtype.base_dtype:
    return x

  cast_x = tf.cast(x, y.dtype)
  if cast_x.device != x.device:
    x_name = "(eager Tensor)"
    try:
      x_name = x.name
    except AttributeError:
      pass
    tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
                       x.device, cast_x.device)
  return cast_x 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:common_layers.py

示例14: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def __init__(self, hps, gpu_mode=True, reuse=False):
    """Initializer for the SketchRNN model.

    Args:
       hps: a HParams object containing model hyperparameters
       gpu_mode: a boolean that when True, uses GPU mode.
       reuse: a boolean that when true, attemps to reuse variables.
    """
    self.hps = hps
    with tf.variable_scope('vector_rnn', reuse=reuse):
      if not gpu_mode:
        with tf.device('/cpu:0'):
          tf.logging.info('Model using cpu.')
          self.build_model(hps)
      else:
        tf.logging.info('Model using gpu.')
        self.build_model(hps) 
开发者ID:magenta,项目名称:magenta,代码行数:19,代码来源:model.py

示例15: run

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import device [as 别名]
def run(config):
  """Entry point to run training."""
  init_data_normalizer(config)

  stage_ids = train_util.get_stage_ids(**config)
  if not config['train_progressive']:
    stage_ids = list(stage_ids)[-1:]

  # Train one stage at a time
  for stage_id in stage_ids:
    batch_size = train_util.get_batch_size(stage_id, **config)
    tf.reset_default_graph()
    with tf.device(tf.train.replica_device_setter(config['ps_tasks'])):
      model = lib_model.Model(stage_id, batch_size, config)
      model.add_summaries()
      print('Variables:')
      for v in tf.global_variables():
        print('\t', v.name, v.get_shape().as_list())
      logging.info('Calling train.train')
      train_util.train(model, **config) 
开发者ID:magenta,项目名称:magenta,代码行数:22,代码来源:gansynth_train.py


注:本文中的tensorflow.compat.v1.device方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。