本文整理汇总了Python中pynvml.nvmlDeviceGetName方法的典型用法代码示例。如果您正苦于以下问题:Python pynvml.nvmlDeviceGetName方法的具体用法?Python pynvml.nvmlDeviceGetName怎么用?Python pynvml.nvmlDeviceGetName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pynvml
的用法示例。
在下文中一共展示了pynvml.nvmlDeviceGetName方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getGPUUsage
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlDeviceGetName [as 别名]
def getGPUUsage():
try:
pynvml.nvmlInit()
count = pynvml.nvmlDeviceGetCount()
if count == 0:
return None
result = {"driver": pynvml.nvmlSystemGetDriverVersion(),
"gpu_count": int(count)
}
i = 0
gpuData = []
while i<count:
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpuData.append({"device_num": i, "name": pynvml.nvmlDeviceGetName(handle), "total": round(float(mem.total)/1000000000, 2), "used": round(float(mem.used)/1000000000, 2)})
i = i+1
result["devices"] = jsonpickle.encode(gpuData, unpicklable=False)
except Exception as e:
result = {"driver": "No GPU!", "gpu_count": 0, "devices": []}
return result
示例2: gpu_info
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlDeviceGetName [as 别名]
def gpu_info(self):
# pip install nvidia-ml-py3
if len(self.gpu_ids) >= 0 and torch.cuda.is_available():
try:
import pynvml
pynvml.nvmlInit()
self.config_dic['gpu_driver_version'] = pynvml.nvmlSystemGetDriverVersion()
for gpu_id in self.gpu_ids:
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
gpu_id_name = "gpu%s" % gpu_id
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_utilize = pynvml.nvmlDeviceGetUtilizationRates(handle)
self.config_dic['%s_device_name' % gpu_id_name] = pynvml.nvmlDeviceGetName(handle)
self.config_dic['%s_mem_total' % gpu_id_name] = gpu_mem_total = round(mem_info.total / 1024 ** 3, 2)
self.config_dic['%s_mem_used' % gpu_id_name] = gpu_mem_used = round(mem_info.used / 1024 ** 3, 2)
# self.config_dic['%s_mem_free' % gpu_id_name] = gpu_mem_free = mem_info.free // 1024 ** 2
self.config_dic['%s_mem_percent' % gpu_id_name] = round((gpu_mem_used / gpu_mem_total) * 100, 1)
self._set_dict_smooth('%s_utilize_gpu' % gpu_id_name, gpu_utilize.gpu, 0.8)
# self.config_dic['%s_utilize_gpu' % gpu_id_name] = gpu_utilize.gpu
# self.config_dic['%s_utilize_memory' % gpu_id_name] = gpu_utilize.memory
pynvml.nvmlShutdown()
except Exception as e:
print(e)
示例3: _get_devices
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlDeviceGetName [as 别名]
def _get_devices(self):
""" Obtain the name of the installed devices. The quality of this information depends on
the backend and OS being used, but it should be sufficient for identifying cards.
Returns
-------
list
List of device names for connected GPUs as corresponding to the values in
:attr:`_handles`
"""
self._initialize()
if self._device_count == 0:
names = list()
if self._is_plaidml:
names = self._plaid.names
elif IS_MACOS:
names = [pynvx.cudaGetName(handle, ignore=True)
for handle in self._handles]
else:
names = [pynvml.nvmlDeviceGetName(handle).decode("utf-8")
for handle in self._handles]
self._log("debug", "GPU Devices: {}".format(names))
return names
示例4: device_name_for
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlDeviceGetName [as 别名]
def device_name_for(device_handle):
"""Get GPU device name"""
try:
return nativestr(pynvml.nvmlDeviceGetName(device_handle))
except pynvml.NVMlError:
return "NVIDIA"
示例5: measure_cpu_gpu_instant_load
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlDeviceGetName [as 别名]
def measure_cpu_gpu_instant_load():
# Get current cpu gpu load, as
# load = [rank, cpu_load, nvidia_device_id, gpu_load]
# result_arr: [load, load, ...]
if cg_load_backend_ok:
global gpu_a_load
global gpu_m_count
global p_handler
cpu_load = p_handler.cpu_percent()
gpu_m_count += 1
try:
comm = current_communicator()
if comm:
index = comm.local_rank
elif 'cuda' in str(nn.get_current_context().backend):
index = 0
else:
raise Exception
handler = pynvml.nvmlDeviceGetHandleByIndex(index)
gpu_load = [
[index, pynvml.nvmlDeviceGetUtilizationRates(handler).gpu]]
if index in gpu_a_load.keys():
gpu_a_load[index]['name'] = pynvml.nvmlDeviceGetName(
handler).decode("utf-8")
o_load = gpu_a_load[index]['load']
n_load = gpu_load[0][1]
gpu_a_load[index]['load'] = (
(gpu_m_count - 1) * o_load + n_load) / gpu_m_count
else:
gpu_a_load[index] = {
'name': pynvml.nvmlDeviceGetName(handler).decode("utf-8"),
'load': gpu_load[0][1]
}
except Exception:
gpu_load = [[-1, -1]]
callback.update_status(
('cpu_gpu_load', collect_and_shape_result(cpu_load, gpu_load)))
示例6: _log_statistics
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlDeviceGetName [as 别名]
def _log_statistics(self, elapsed_steps, elapsed_time, global_step):
"""Collect and store all summary values.
Arguments:
elapsed_steps (int):
The number of steps between the current trigger event and the last one.
elapsed_time (float):
The number of seconds between the current trigger event and the last one.
global_step (tf.Tensor):
Global step tensor.
"""
# Write summary for tensorboard.
if self._summary_writer is not None:
summary_list = list()
# Add only summaries.
for gpu_id in self._gpu_statistics.keys():
for statistic in self._gpu_statistics[gpu_id].keys():
# only add them if they are requested for logging.
if statistic in self._statistics_to_log:
values = self._gpu_statistics[gpu_id][statistic]
# Only Calculate and write average if there is data available.
if values:
avg_value = sum(values) / len(values)
avg_summary = Summary.Value(tag='{}/{}:{}'
.format(self._group_tag, gpu_id, statistic),
simple_value=avg_value)
summary_list.append(avg_summary)
# Write all statistics as simple scalar summaries.
summary = Summary(value=summary_list)
self._summary_writer.add_summary(summary, global_step)
# Log summaries to the logging stream.
if not self._suppress_stdout:
for gpu_id in self._gpu_statistics.keys():
# Acquire a GPU device handle.
handle = nvml.nvmlDeviceGetHandleByIndex(gpu_id)
# Query the device name.
name = nvml.nvmlDeviceGetName(handle).decode('utf-8')
for statistic in self._gpu_statistics[gpu_id].keys():
# Log utilization information with INFO level.
logging.debug("%s: %s", name, '{}: {}'
.format(statistic, self._gpu_statistics[gpu_id][statistic]))
# The following code has been inspired by <https://stackoverflow.com/a/45681782>: