當前位置: 首頁>>代碼示例>>Python>>正文


Python device_lib.list_local_devices方法代碼示例

本文整理匯總了Python中tensorflow.python.client.device_lib.list_local_devices方法的典型用法代碼示例。如果您正苦於以下問題:Python device_lib.list_local_devices方法的具體用法?Python device_lib.list_local_devices怎麽用?Python device_lib.list_local_devices使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.client.device_lib的用法示例。


在下文中一共展示了device_lib.list_local_devices方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sg_gpus

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def sg_gpus():
    r""" Gets current available GPU nums

    Returns:
      A integer : total # of GPUs available
    """
    global _gpus

    if _gpus is None:
        local_device_protos = device_lib.list_local_devices()
        _gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU'])

    return max(_gpus, 1)


#
# context helpers
# 
開發者ID:buriburisuri,項目名稱:sugartensor,代碼行數:20,代碼來源:sg_main.py

示例2: _collect_gpu_info

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def _collect_gpu_info(run_info):
  """Collect local GPU information by TF device library."""
  gpu_info = {}
  local_device_protos = device_lib.list_local_devices()

  gpu_info["count"] = len([d for d in local_device_protos
                           if d.device_type == "GPU"])
  # The device description usually is a JSON string, which contains the GPU
  # model info, eg:
  # "device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0"
  for d in local_device_protos:
    if d.device_type == "GPU":
      gpu_info["model"] = _parse_gpu_model(d.physical_device_desc)
      # Assume all the GPU connected are same model
      break
  run_info["machine_config"]["gpu_info"] = gpu_info 
開發者ID:IntelAI,項目名稱:models,代碼行數:18,代碼來源:logger.py

示例3: _get_devices

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def _get_devices(self):
        available_devices = device_lib.list_local_devices()

        # Remove internal `XLA` devices, see `using JIT compilation <https://www.tensorflow.org/xla/jit>`_.
        usable_devices = [device.name for device in available_devices
                          if 'XLA' not in device.name]

        if self.config.get('device'):
            devices = self.config.get('device')
            devices = devices if isinstance(devices, list) else [devices]
            devices = [device for name in devices for device in usable_devices
                       if re.search(name.upper(), device.upper()) is not None]
            devices = [device for i, device in enumerate(devices)
                       if device not in devices[:i]]
        else:
            cpu_devices = [device for device in usable_devices
                           if 'CPU' in device]
            gpu_devices = [device for device in usable_devices
                           if 'GPU' in device]
            if gpu_devices:
                devices = [gpu_devices[0]]
            else:
                devices = [cpu_devices[0]]
        return devices 
開發者ID:analysiscenter,項目名稱:batchflow,代碼行數:26,代碼來源:base.py

示例4: get_machine_info

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_machine_info():
    parameter_value_map = {}
    operating_sys = sys.platform
    parameter_value_map['Operating System'] = operating_sys
    if 'linux' not in operating_sys:
        return parameter_value_map

    for i, device in enumerate(device_lib.list_local_devices()):
        if device.device_type != 'GPU':
            continue
        parameter_value_map['GPU_{}_name'.format(i + 1)] = device.name
        parameter_value_map['GPU_{}_memory_limit'.format(i + 1)] = device.memory_limit
        parameter_value_map['GPU_{}_description'.format(i + 1)] = device.physical_device_desc
    lscpu = subprocess.check_output("lscpu | grep '^CPU(s):\\|Core\\|Thread'", shell=True).strip().decode()
    lscpu = lscpu.split('\n')
    for row in lscpu:
        row = row.split(':')
        parameter_value_map[row[0]] = row[1].strip()
    return parameter_value_map 
開發者ID:marco-c,項目名稱:autowebcompat,代碼行數:21,代碼來源:utils.py

示例5: gather_available_device_info

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def gather_available_device_info():
  """Gather list of devices available to TensorFlow.

  Returns:
    A list of test_log_pb2.AvailableDeviceInfo messages.
  """
  device_info_list = []
  devices = device_lib.list_local_devices()

  for d in devices:
    device_info = test_log_pb2.AvailableDeviceInfo()
    device_info.name = d.name
    device_info.type = d.device_type
    device_info.memory_limit = d.memory_limit
    device_info.physical_description = d.physical_device_desc
    device_info_list.append(device_info)

  return device_info_list 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:20,代碼來源:system_info_lib.py

示例6: validate_batch_size_for_multi_gpu

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def validate_batch_size_for_multi_gpu(batch_size):
    """For multi-gpu, batch-size must be a multiple of the number of
    available GPUs.
    Note that this should eventually be handled by replicate_model_fn
    directly. Multi-GPU support is currently experimental, however,
    so doing the work here until that feature is in place.
    """
    if FLAGS.multi_gpu:
        from tensorflow.python.client import device_lib
        local_device_protos = device_lib.list_local_devices()
        num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
        if not num_gpus:
            raise ValueError('Multi-GPU mode was specified, but no GPUs '
                             'were found. To use CPU, run --multi_gpu=False.')
        remainder = batch_size % num_gpus
        if remainder:
            err = ('When running with multiple GPUs, batch size '
                   'must be a multiple of the number of available GPUs. '
                   'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
                  ).format(num_gpus, batch_size, batch_size - remainder)
            raise ValueError(err)
        return num_gpus
    return 0 
開發者ID:mlperf,項目名稱:inference,代碼行數:25,代碼來源:train_ssd_large.py

示例7: get_available_gpus

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_available_gpus():
    """
    Returns a list of string names of all available GPUs
    """
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:8,代碼來源:utils_tf.py

示例8: available_gpus

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def available_gpus():
  """List of GPU device names detected by TensorFlow."""
  local_device_protos = device_lib.list_local_devices()
  return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
開發者ID:utra-robosoccer,項目名稱:soccer-matlab,代碼行數:6,代碼來源:utility.py

示例9: get_available_gpus

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_available_gpus():
    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] 
開發者ID:xuwd11,項目名稱:cs294-112_hws,代碼行數:6,代碼來源:run_dqn_atari.py

示例10: get_available_gpus

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_available_gpus():
    # recipe from here:
    # https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa

    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU']

# ================================================================
# Saving variables
# ================================================================ 
開發者ID:MaxSobolMark,項目名稱:HardRLWithYoutube,代碼行數:13,代碼來源:tf_util.py

示例11: get_available_devs

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_available_devs():
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type == 'GPU'] 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:5,代碼來源:mdbt_util.py

示例12: get_gpu_count

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_gpu_count():
    local_device_protos = device_lib.list_local_devices()
    return len([x.name for x in local_device_protos if x.device_type == 'GPU']) 
開發者ID:lambdal,項目名稱:lambda-deep-learning-demo,代碼行數:5,代碼來源:config_parser.py

示例13: get_tf_session

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_tf_session():
    """ Returning a session. Set options here if desired. """
    tf.reset_default_graph()
    tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,
                               intra_op_parallelism_threads=1)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    def get_available_gpus():
        from tensorflow.python.client import device_lib
        local_device_protos = device_lib.list_local_devices()
        return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']

    print("AVAILABLE GPUS: ", get_available_gpus())
    return session 
開發者ID:DanielTakeshi,項目名稱:rl_algorithms,代碼行數:17,代碼來源:bc.py

示例14: get_tf_session

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_tf_session():
    """ Returning a session. Set options here (e.g. for GPUs) if desired. """
    tf.reset_default_graph()
    tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,
                               intra_op_parallelism_threads=1)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    def get_available_gpus():
        from tensorflow.python.client import device_lib
        local_device_protos = device_lib.list_local_devices()
        return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']

    print("AVAILABLE GPUS: ", get_available_gpus())
    return session 
開發者ID:DanielTakeshi,項目名稱:rl_algorithms,代碼行數:17,代碼來源:utils.py

示例15: get_available_gpus

# 需要導入模塊: from tensorflow.python.client import device_lib [as 別名]
# 或者: from tensorflow.python.client.device_lib import list_local_devices [as 別名]
def get_available_gpus():
    from tensorflow.python.client import device_lib
    local_device_protos = device_lib.list_local_devices()
    return [x.name for x in local_device_protos if x.device_type=='GPU'] 
開發者ID:mkocaoglu,項目名稱:CausalGAN,代碼行數:6,代碼來源:utils.py


注:本文中的tensorflow.python.client.device_lib.list_local_devices方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。