当前位置: 首页>>代码示例>>Python>>正文


Python device_util.canonicalize函数代码示例

本文整理汇总了Python中tensorflow.python.distribute.device_util.canonicalize函数的典型用法代码示例。如果您正苦于以下问题:Python canonicalize函数的具体用法?Python canonicalize怎么用?Python canonicalize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了canonicalize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _reduce_to

  def _reduce_to(self, reduce_op, value, destinations):
    if values._enclosing_tpu_context() is not None:  # pylint: disable=protected-access
      if reduce_op == reduce_util.ReduceOp.MEAN:
        # TODO(jhseu):  Revisit once we support model-parallelism.
        value *= (1. / self._num_replicas_in_sync)
      elif reduce_op != reduce_util.ReduceOp.SUM:
        raise NotImplementedError(
            "Currently only support sum & mean in TPUStrategy.")
      return tpu_ops.cross_replica_sum(value)

    if not isinstance(value, values.DistributedValues):
      # This function handles reducing values that are not PerReplica or
      # Mirrored values. For example, the same value could be present on all
      # replicas in which case `value` would be a single value or value could
      # be 0.
      return cross_device_ops_lib.reduce_non_distributed_value(
          reduce_op, self._device_map, value, destinations)

    # Validate that the destination is same as the host device
    # Note we don't do this when in replicate context as the reduction is
    # performed on the TPU device itself.
    devices = cross_device_ops_lib.get_devices_from(destinations)
    if len(devices) == 1:
      assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
          self._host_device)
    else:
      raise ValueError("Multiple devices are not supported for TPUStrategy")

    output = math_ops.add_n(value)
    if reduce_op == reduce_util.ReduceOp.MEAN:
      return output * (1. / len(value))
    return output
开发者ID:hdyen,项目名称:tensorflow,代码行数:32,代码来源:tpu_strategy.py

示例2: __init__

  def __init__(self, device_map, worker_device_pairs=None, logical_device=0):
    """Initialize an `InputWorkers` object.

    Args:
      device_map: A `DeviceMap` with the computation devices fed by the
        input workers.
      worker_device_pairs: A sequence of pairs:
        `(input device, a tuple of compute devices fed by that input device)`.
      logical_device: The logical device of `device_map` to feed.
    """
    self._device_map = device_map
    self._logical_device = logical_device
    if worker_device_pairs is None:
      worker_device_pairs = ((
          device_util.canonicalize("/device:CPU:0"),
          device_map.logical_to_actual_devices(logical_device)),)
    self._input_worker_devices = tuple(d for d, _ in worker_device_pairs)
    self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f)
                              for _, f in worker_device_pairs)
    flattened = tuple(d for l in self._fed_devices for d in l)
    assert (flattened ==
            device_map.logical_to_actual_devices(logical_device)), (
                "flattened: %s logical device %d: %s" %
                (flattened, logical_device,
                 device_map.logical_to_actual_devices(logical_device)))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:25,代码来源:input_lib.py

示例3: testCanonicalizeWithDefaultDevice

 def testCanonicalizeWithDefaultDevice(self):
   self.assertEqual(
       device_util.canonicalize("/job:worker/task:1/cpu:0", default="/gpu:0"),
       "/job:worker/replica:0/task:1/device:CPU:0")
   self.assertEqual(
       device_util.canonicalize("/job:worker/task:1", default="/gpu:0"),
       "/job:worker/replica:0/task:1/device:GPU:0")
   self.assertEqual(
       device_util.canonicalize("/cpu:0", default="/job:worker"),
       "/job:worker/replica:0/task:0/device:CPU:0")
开发者ID:bunbutter,项目名称:tensorflow,代码行数:10,代码来源:device_util_test.py

示例4: _initialize_local_worker

  def _initialize_local_worker(self, num_gpus_per_worker):
    """Initializes the object for local training."""
    self._is_chief = True
    self._num_workers = 1

    if num_gpus_per_worker:
      local_devices = [
          "/device:GPU:%d" % i for i in range(num_gpus_per_worker)
      ]
    else:
      local_devices = ["/device:CPU:0"]
    self._worker_device = device_util.canonicalize("/device:CPU:0")

    self._collective_keys = cross_device_utils.CollectiveKeys()
    self._initialize_local(local_devices)
    self._cross_tower_ops = cross_device_ops_lib.CollectiveAllReduce(
        num_workers=self._num_workers,
        num_gpus_per_worker=num_gpus_per_worker,
        collective_keys=self._collective_keys)

    self._cluster_spec = None
    self._task_type = None
    self._task_id = None

    logging.info("CollectiveAllReduceStrategy with local_devices = %r",
                 local_devices)
开发者ID:aeverall,项目名称:tensorflow,代码行数:26,代码来源:collective_all_reduce_strategy.py

示例5: _initialize_local

  def _initialize_local(self, cluster_resolver):
    """Initialize internal devices for local training."""
    worker_device = device_util.canonicalize("/device:CPU:0")
    self._input_host_device = numpy_dataset.SingleDevice(worker_device)
    num_gpus = cluster_resolver.num_accelerators()
    # Define compute devices which is a list of device strings and one for each
    # replica. When there are GPUs, replicate operations on these GPUs.
    # Otherwise, place operations on CPU.
    if num_gpus > 0:
      compute_devices = tuple(map("/device:GPU:{}".format, range(num_gpus)))
    else:
      compute_devices = (_LOCAL_CPU,)

    self._device_map = values.ReplicaDeviceMap(compute_devices)
    self._input_workers = input_lib.InputWorkers(
        self._device_map, [(worker_device, compute_devices)])

    # If there is only one GPU, put everything on that GPU. Otherwise, place
    # variables on CPU.
    if num_gpus == 1:
      assert len(compute_devices) == 1
      self._variable_device = _LOCAL_GPU_0
      self._parameter_devices = (_LOCAL_GPU_0,)
    else:
      self._variable_device = _LOCAL_CPU
      self._parameter_devices = (_LOCAL_CPU,)

    self._is_chief = True
    self._cluster_spec = None
    self._task_type = None
    self._task_id = None

    logging.info(
        "ParameterServerStrategy with compute_devices = %r, "
        "variable_device = %r", compute_devices, self._variable_device)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:35,代码来源:parameter_server_strategy.py

示例6: _initialize_local_worker

  def _initialize_local_worker(self, num_gpus_per_worker):
    """Initializes the object for local training."""
    self._is_chief = True
    self._num_workers = 1

    if num_gpus_per_worker:
      local_devices = tuple(
          "/device:GPU:%d" % i for i in range(num_gpus_per_worker)
      )
    else:
      local_devices = ("/device:CPU:0",)
    self._worker_device = device_util.canonicalize("/device:CPU:0")
    self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)

    self._collective_keys = cross_device_utils.CollectiveKeys()
    self._initialize_local(local_devices)
    # TODO(yuefengz): remove num_gpus_per_worker from CollectiveAllReduce.
    self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
        num_workers=self._num_workers,
        num_gpus_per_worker=num_gpus_per_worker,
        collective_keys=self._collective_keys)

    self._cluster_spec = None
    self._task_type = None
    self._task_id = None

    logging.info("CollectiveAllReduceStrategy with local_devices = %r",
                 local_devices)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:28,代码来源:collective_all_reduce_strategy.py

示例7: _initialize_local

  def _initialize_local(self, num_gpus_per_worker):
    """Initialize internal devices for local training."""
    self._worker_device = device_util.canonicalize("/device:CPU:0")
    # Define compute devices which is a list of device strings and one for each
    # replica. When there are GPUs, replicate operations on these GPUs.
    # Otherwise, place operations on CPU.
    if num_gpus_per_worker > 0:
      self._compute_devices = list(
          map("/device:GPU:{}".format, range(num_gpus_per_worker)))
    else:
      self._compute_devices = [_LOCAL_CPU]

    self._compute_devices = list(
        map(device_util.resolve, self._compute_devices))
    self._canonical_compute_device_set = set(self._compute_devices)

    # If there is only one GPU, put everything on that GPU. Otherwise, place
    # variables on CPU.
    if num_gpus_per_worker == 1:
      assert len(list(self._compute_devices)) == 1
      self._variable_device = _LOCAL_GPU_0
      self._parameter_devices = [_LOCAL_GPU_0]
    else:
      self._variable_device = _LOCAL_CPU
      self._parameter_devices = [_LOCAL_CPU]

    self._is_chief = True
    self._cluster_spec = None
    self._task_type = None
    self._task_id = None

    logging.info(
        "ParameterServerStrategy with compute_devices = %r, "
        "variable_device = %r", self._compute_devices, self._variable_device)
开发者ID:aeverall,项目名称:tensorflow,代码行数:34,代码来源:parameter_server_strategy.py

示例8: __init__

 def __init__(self, container_strategy, device):
   super(OneDeviceExtended, self).__init__(container_strategy)
   self._device = device
   self._input_device = device_util.canonicalize("/device:CPU:0")
   worker_device_pairs = [(self._input_device, [self._device])]
   device_map = values.SingleDeviceMap(device)
   self._input_workers = input_lib.InputWorkers(
       device_map, worker_device_pairs)
开发者ID:perfmjs,项目名称:tensorflow,代码行数:8,代码来源:one_device_strategy.py

示例9: _make_dataset_iterator

  def _make_dataset_iterator(self, dataset):
    if self._local_mode:
      worker = device_util.canonicalize("/device:CPU:0")
      worker_device_pairs = [(worker, self._devices)]
    else:
      worker_device_pairs = self._worker_devices

    return values.DatasetIterator(dataset, worker_device_pairs,
                                  self._num_replicas_in_sync)
开发者ID:aeverall,项目名称:tensorflow,代码行数:9,代码来源:mirrored_strategy.py

示例10: _make_input_fn_iterator

 def _make_input_fn_iterator(
     self,
     input_fn,
     replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
   worker = device_util.canonicalize("/device:CPU:0")
   worker_device_pairs = [(worker, [self._device])]
   return values.InputFunctionIterator(
       input_fn, worker_device_pairs,
       [distribute_lib.InputContext()])
开发者ID:aeverall,项目名称:tensorflow,代码行数:9,代码来源:one_device_strategy.py

示例11: _initialize_multi_worker

  def _initialize_multi_worker(self, num_gpus, cluster_spec):
    """Initializes the object for multi-worker training."""
    cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
    self._cluster_spec = cluster_spec

    self._workers = []
    for job in ["chief", "worker"]:
      for task in range(len(cluster_spec.as_dict().get(job, []))):
        self._workers.append("/job:%s/task:%d" % (job, task))

    if num_gpus is None:
      raise ValueError("`num_gpus` is required if `cluster_spec` is given.")
    if num_gpus > 0:
      self._worker_devices = [
          (worker, [
              device_util.canonicalize(worker + "/device:GPU:%d" % gpu)
              for gpu in range(num_gpus)
          ]) for worker in self._workers
      ]
    else:
      self._worker_devices = [
          (worker, [device_util.canonicalize(worker, "/device:CPU:0")])
          for worker in self._workers
      ]

    devices = nest.flatten([l for _, l in self._worker_devices])

    # Setting `_default_device` will add a device scope in the
    # distribution.scope. We set the default device to the first worker. When
    # users specify device under distribution.scope by
    #   with tf.device("/cpu:0"):
    #     ...
    # their ops will end up on the cpu device of its first worker, e.g.
    # "/job:worker/task:0/device:CPU:0". Note this is not used in replica mode.
    self._default_device = self._workers[0]

    assert devices, "Must specify at least one device."
    assert len(set(devices)) == len(devices), (
        "No duplicates allowed in `devices` argument.")
    # TODO(josh11b): Require at least 2 devices?
    self._devices = [device_util.resolve(d) for d in devices]
    self._canonical_device_set = set(self._devices)
    self._device_index = values.PerReplica(
        {d: i for i, d in enumerate(devices)})
开发者ID:bunbutter,项目名称:tensorflow,代码行数:44,代码来源:mirrored_strategy.py

示例12: _is_per_replica

 def _is_per_replica(self, result, expected, klass=values.PerReplica):
   self.assertIsInstance(result, klass)
   # We canonicalize the devices to match the device strings returned
   # by PerReplica, which also does device string canonicalization.
   devices = [device_util.canonicalize(_device_str(i))
              for i in range(len(expected))]
   self.assertEqual(set(devices), set(result.devices))
   for i, d in enumerate(devices):
     self.assertEqual(expected[i], result.get(d))
     self.assertEqual(expected[i], result.get(_device_str(i)))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:10,代码来源:values_test.py

示例13: _reduce_to

  def _reduce_to(self, reduce_op, value, destinations):
    if values._enclosing_tpu_context() is not None:  # pylint: disable=protected-access
      if reduce_op == reduce_util.ReduceOp.MEAN:
        # TODO(jhseu):  Revisit once we support model-parallelism.
        value *= (1. / self._num_replicas_in_sync)
      elif reduce_op != reduce_util.ReduceOp.SUM:
        raise NotImplementedError(
            "Currently only support sum & mean in TPUStrategy.")
      return tpu_ops.cross_replica_sum(value)

    if not isinstance(value, values.DistributedValues):
      # This function handles reducing values that are not PerReplica or
      # Mirrored values. For example, the same value could be present on all
      # replicas in which case `value` would be a single value or value could
      # be 0.
      return cross_device_ops_lib.reduce_non_distributed_value(
          reduce_op, self._device_map, value, destinations)

    devices = cross_device_ops_lib.get_devices_from(destinations)
    if len(devices) != 1:
      raise ValueError("Multiple devices are not supported for TPUStrategy")

    # Always performs the reduction on the TPU host.
    with ops.device(self._host_device):
      output = math_ops.add_n(value.values)
      if reduce_op == reduce_util.ReduceOp.MEAN:
        output *= (1. / len(value.values))

    # If necessary, copy to requested destination.
    dest_canonical = device_util.canonicalize(devices[0])
    host_canonical = device_util.canonicalize(self._host_device)

    if dest_canonical != host_canonical:
      with ops.device(devices[0]):
        output = array_ops.identity(output)

    return output
开发者ID:jackd,项目名称:tensorflow,代码行数:37,代码来源:tpu_strategy.py

示例14: choose_the_best

def choose_the_best(devices, session_config=None):
  """Find the best subclass of CrossDeviceOps given a session config.

  Args:
    devices: a list of devices passed to `tf.distribute.Strategy`.
    session_config: a `tf.ConfigProto` or `None`. If `None`, it will make
      decision based on all local devices.

  Returns:
    A subclass of `CrossDeviceOps`.
  """
  requested_devices = set([device_util.canonicalize(d) for d in devices])
  machine_devices = device_lib.list_local_devices(session_config=session_config)
  using_devices = []
  for d in machine_devices:
    if device_util.canonicalize(d.name) in requested_devices:
      using_devices.append(d)
    else:
      logging.info(
          "Device is available but not used by distribute strategy: %s", d.name)

  if len(using_devices) != len(requested_devices):
    logging.warning("Not all devices in `tf.distribute.Strategy` are visible "
                    "to TensorFlow.")
    return ReductionToOneDevice()

  if any(d.device_type.lower() != "gpu" for d in using_devices):
    logging.warning("Not all devices in `tf.distribute.Strategy` are visible "
                    "to TensorFlow.")
    return ReductionToOneDevice()

  device_links = [[] for _ in range(len(using_devices))]
  for i, device in enumerate(using_devices):
    for link in device.locality.links.link:
      device_links[i].append(link.device_id)

  return _choose_all_reduce_algorithm(device_links)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:37,代码来源:cross_device_ops.py

示例15: _reduce_to

  def _reduce_to(self, reduce_op, value, destinations):
    if values._enclosing_tpu_context() is not None:  # pylint: disable=protected-access
      if reduce_op == reduce_util.ReduceOp.MEAN:
        # TODO(jhseu):  Revisit once we support model-parallelism.
        value *= (1. / self._num_replicas_in_sync)
      elif reduce_op != reduce_util.ReduceOp.SUM:
        raise NotImplementedError(
            "Currently only support sum & mean in TPUStrategy.")
      return tpu_ops.cross_replica_sum(value)

    # Validate that the destination is same as the host device
    # Note we don't do this when in replicate context as the reduction is
    # performed on the TPU device itself.
    devices = cross_device_ops_lib.get_devices_from(destinations)
    if len(devices) == 1:
      assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
          self._host_device)
    else:
      raise ValueError("Multiple devices are not supported for TPUStrategy")

    output = math_ops.add_n(value)
    if reduce_op == reduce_util.ReduceOp.MEAN:
      return output * (1. / len(value))
    return output
开发者ID:aeverall,项目名称:tensorflow,代码行数:24,代码来源:tpu_strategy.py


注:本文中的tensorflow.python.distribute.device_util.canonicalize函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。