本文整理汇总了Python中tensorflow.python.client.device_lib.list_local_devices函数的典型用法代码示例。如果您正苦于以下问题:Python list_local_devices函数的具体用法?Python list_local_devices怎么用?Python list_local_devices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了list_local_devices函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testListLocalDevices
def testListLocalDevices(self):
devices = device_lib.list_local_devices()
self.assertGreater(len(devices), 0)
self.assertEqual(devices[0].device_type, "CPU")
devices = device_lib.list_local_devices(config_pb2.ConfigProto())
self.assertGreater(len(devices), 0)
self.assertEqual(devices[0].device_type, "CPU")
# GPU test
if test.is_gpu_available():
self.assertGreater(len(devices), 1)
self.assertTrue("GPU" in [d.device_type for d in devices] or
"SYCL" in [d.device_type for d in devices])
示例2: is_gpu_available
def is_gpu_available(cuda_only=False):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
Returns:
True iff a gpu device of the requested kind is available.
"""
if cuda_only:
return any((x.device_type == 'GPU')
for x in _device_lib.list_local_devices())
else:
return any((x.device_type == 'GPU' or x.device_type == 'SYCL')
for x in _device_lib.list_local_devices())
示例3: validate_batch_size_for_multi_gpu
def validate_batch_size_for_multi_gpu(batch_size):
"""For multi-gpu, batch-size must be a multiple of the number of GPUs.
Note that this should eventually be handled by replicate_model_fn
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
Args:
batch_size: the number of examples processed in each training batch.
Raises:
ValueError: if no GPUs are found, or selected batch_size is invalid.
"""
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
local_device_protos = device_lib.list_local_devices()
num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
if not num_gpus:
raise ValueError('Multi-GPU mode was specified, but no GPUs '
'were found. To use CPU, run without --multi_gpu.')
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. '
'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
示例4: count_gpus
def count_gpus():
from tensorflow.python.client import device_lib
count = 0
for device in device_lib.list_local_devices():
if device.device_type == "GPU":
count+=1
return count
示例5: main
def main(args):
logging.info( args )
device = 'gpu' if args.gpu else 'cpu'
devices = device_lib.list_local_devices()
num_gpus = len([d for d in devices if '/gpu' in d.name])
env = gym.make(args.game)
env = Env(env, resized_width=84, resized_height=84, agent_history_length=4)
num_actions = len(env.gym_actions)
global_net = Network(num_actions, -1, 'cpu')
actor_networks = []
for t in range(args.threads):
device_index = 0 if device is 'cpu' else (t if args.threads <= num_gpus else 0)
n = Network(num_actions, t, device, device_index)
n.tie_global_net(global_net)
actor_networks.append(n)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=args.threads, inter_op_parallelism_threads=args.threads))
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
threads = []
for t, net in enumerate(actor_networks):
e = Env(gym.make(args.game), net.width, net.height, net.depth)
w = Worker(t, e, net, sess, saver, args.checkpoint_dir)
w.start()
threads.append(w)
for t in threads:
t.join()
示例6: is_gpu_available
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
示例7: _get_local_devices
def _get_local_devices(device_type):
local_device_protos = device_lib.list_local_devices()
return [
device.name
for device in local_device_protos
if device.device_type == device_type
]
示例8: setUp
def setUp(self):
# Load the rime operation library
from montblanc.impl.rime.tensorflow import load_tf_lib
self.rime = load_tf_lib()
# Obtain a list of GPU device specifications ['/gpu:0', '/gpu:1', ...]
self.gpu_devs = [d.name for d in device_lib.list_local_devices()
if d.device_type == 'GPU']
示例9: get_available_gpus
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
示例10: get_available_gpus
def get_available_gpus():
"""
Returns a list of the identifiers of all visible GPUs.
"""
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
示例11: get_available_gpus
def get_available_gpus():
'''
DESCRIPTION:
This function is same as that used in train_multi_gpu
script. One modification that could be done later is
to run the inference on the system which dont have any
GPU but instead just CPUs.
So this function could return those names also instead.
USAGE:
OUTPUT:
all_gpu_name :list of name of of all the gpus which
are visible to tensorflow.
'''
#This will give the list of all the devices (including CPUs)
local_devices=device_lib.list_local_devices()
#Now filtering the GPU devices to run the inference.
'''Test whether running inference affect with different devices
since batchnorm statistics will be saved, which will be specific
to the devices. So atleast we need to have same graph to run
the inference after restoring the checkpoint? unless all the
weights (including the BNs were on cpu)'''
all_gpu_name=[x.name for x in local_devices
if x.device_type=='GPU']
return all_gpu_name
示例12: get_config
def get_config(model_type, prior_pi, log_sigma1, log_sigma2):
"""Get model config."""
print ("Using Model configuration: %s"%model_type)
if model_type == "small":
config = SmallConfig()
elif model_type == "medium":
config = MediumConfig()
elif model_type == "large":
config = LargeConfig()
elif model_type == "test":
config = TestConfig()
elif model_type == "aritificial":
config = ArtificialDataConfig()
else:
raise ValueError("Invalid model: %s", model_type)
config.prior_pi = prior_pi
config.log_sigma1 = log_sigma1
config.log_sigma2 = log_sigma2
########### Automatically get the number of GPUs we have ##################
gpus = [x.name for x in device_lib.list_local_devices() if x.device_type == "GPU"]
# print(len(gpus))
if len(gpus) == 0:
config.num_gpus = 1
# TODO: We need to set it to at least one.
else:
config.num_gpus = len(gpus)
print ("$$$$$$$$$$$ YOU ACTUALLY HAVE GPUs DUDE $$$$$$$$$$$")
return config
示例13: get_nr_gpu
def get_nr_gpu():
"""
Returns:
int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
"""
env = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if env is not None:
return len(env.split(','))
output, code = subproc_call("nvidia-smi -L", timeout=5)
if code == 0:
output = output.decode('utf-8')
return len(output.strip().split('\n'))
else:
try:
# Use NVML to query device properties
with NVMLContext() as ctx:
return ctx.num_devices()
except Exception:
# Fallback
# Note this will initialize all GPUs and therefore has side effect
# https://github.com/tensorflow/tensorflow/issues/8136
logger.info("Loading local devices by TensorFlow ...")
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return len([x.name for x in local_device_protos if x.device_type == 'GPU'])
示例14: get_available_gpus
def get_available_gpus(ngpus=-1):
'''
:param int ngpus: GPUs max to use. Default -1 means all gpus.
:returns: List of gpu devices. Ex.: ['/gpu:0', '/gpu:1', ...]
'''
local_device_protos = device_lib.list_local_devices()
gpus_list = [x.name for x in local_device_protos if x.device_type == 'GPU']
return gpus_list[:ngpus] if ngpus > -1 else gpus_list
示例15: get_num_gpus
def get_num_gpus(flags_obj):
"""Treat num_gpus=-1 as 'use all'."""
if flags_obj.num_gpus != -1:
return flags_obj.num_gpus
from tensorflow.python.client import device_lib # pylint: disable=g-import-not-at-top
local_device_protos = device_lib.list_local_devices()
return sum([1 for d in local_device_protos if d.device_type == "GPU"])