本文整理汇总了Python中pynvml.nvmlInit方法的典型用法代码示例。如果您正苦于以下问题:Python pynvml.nvmlInit方法的具体用法?Python pynvml.nvmlInit怎么用?Python pynvml.nvmlInit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pynvml
的用法示例。
在下文中一共展示了pynvml.nvmlInit方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getFreeId
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def getFreeId():
import pynvml
pynvml.nvmlInit()
def getFreeRatio(id):
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
use = pynvml.nvmlDeviceGetUtilizationRates(handle)
ratio = 0.5*(float(use.gpu+float(use.memory)))
return ratio
deviceCount = pynvml.nvmlDeviceGetCount()
available = []
for i in range(deviceCount):
if getFreeRatio(i)<70:
available.append(i)
gpus = ''
for g in available:
gpus = gpus+str(g)+','
gpus = gpus[:-1]
return gpus
示例2: getGPUUsage
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def getGPUUsage():
try:
pynvml.nvmlInit()
count = pynvml.nvmlDeviceGetCount()
if count == 0:
return None
result = {"driver": pynvml.nvmlSystemGetDriverVersion(),
"gpu_count": int(count)
}
i = 0
gpuData = []
while i<count:
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpuData.append({"device_num": i, "name": pynvml.nvmlDeviceGetName(handle), "total": round(float(mem.total)/1000000000, 2), "used": round(float(mem.used)/1000000000, 2)})
i = i+1
result["devices"] = jsonpickle.encode(gpuData, unpicklable=False)
except Exception as e:
result = {"driver": "No GPU!", "gpu_count": 0, "devices": []}
return result
示例3: gpu_info
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def gpu_info(self):
# pip install nvidia-ml-py3
if len(self.gpu_ids) >= 0 and torch.cuda.is_available():
try:
import pynvml
pynvml.nvmlInit()
self.config_dic['gpu_driver_version'] = pynvml.nvmlSystemGetDriverVersion()
for gpu_id in self.gpu_ids:
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
gpu_id_name = "gpu%s" % gpu_id
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_utilize = pynvml.nvmlDeviceGetUtilizationRates(handle)
self.config_dic['%s_device_name' % gpu_id_name] = pynvml.nvmlDeviceGetName(handle)
self.config_dic['%s_mem_total' % gpu_id_name] = gpu_mem_total = round(mem_info.total / 1024 ** 3, 2)
self.config_dic['%s_mem_used' % gpu_id_name] = gpu_mem_used = round(mem_info.used / 1024 ** 3, 2)
# self.config_dic['%s_mem_free' % gpu_id_name] = gpu_mem_free = mem_info.free // 1024 ** 2
self.config_dic['%s_mem_percent' % gpu_id_name] = round((gpu_mem_used / gpu_mem_total) * 100, 1)
self._set_dict_smooth('%s_utilize_gpu' % gpu_id_name, gpu_utilize.gpu, 0.8)
# self.config_dic['%s_utilize_gpu' % gpu_id_name] = gpu_utilize.gpu
# self.config_dic['%s_utilize_memory' % gpu_id_name] = gpu_utilize.memory
pynvml.nvmlShutdown()
except Exception as e:
print(e)
示例4: getFreeId
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def getFreeId():
import pynvml
pynvml.nvmlInit()
def getFreeRatio(id):
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
use = pynvml.nvmlDeviceGetUtilizationRates(handle)
ratio = 0.5 * (float(use.gpu + float(use.memory)))
return ratio
deviceCount = pynvml.nvmlDeviceGetCount()
available = []
for i in range(deviceCount):
if getFreeRatio(i) < 70:
available.append(i)
gpus = ''
for g in available:
gpus = gpus + str(g) + ','
gpus = gpus[:-1]
return gpus
示例5: __init__
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def __init__(self, id=0):
"""Create object to control device using NVML"""
pynvml.nvmlInit()
self.dev = pynvml.nvmlDeviceGetHandleByIndex(id)
try:
self._pwr_limit = pynvml.nvmlDeviceGetPowerManagementLimit(self.dev)
self.pwr_constraints = pynvml.nvmlDeviceGetPowerManagementLimitConstraints(self.dev)
except pynvml.NVMLError_NotSupported:
self._pwr_limit = None
self.pwr_constraints = [1, 0] # inverted range to make all range checks fail
try:
self._persistence_mode = pynvml.nvmlDeviceGetPersistenceMode(self.dev)
except pynvml.NVMLError_NotSupported:
self._persistence_mode = None
try:
self._auto_boost = pynvml.nvmlDeviceGetAutoBoostedClocksEnabled(self.dev)[0] # returns [isEnabled, isDefaultEnabled]
except pynvml.NVMLError_NotSupported:
self._auto_boost = None
try:
self.gr_clock_default = pynvml.nvmlDeviceGetDefaultApplicationsClock(self.dev, pynvml.NVML_CLOCK_GRAPHICS)
self.sm_clock_default = pynvml.nvmlDeviceGetDefaultApplicationsClock(self.dev, pynvml.NVML_CLOCK_SM)
self.mem_clock_default = pynvml.nvmlDeviceGetDefaultApplicationsClock(self.dev, pynvml.NVML_CLOCK_MEM)
self.supported_mem_clocks = pynvml.nvmlDeviceGetSupportedMemoryClocks(self.dev)
#gather the supported gr clocks for each supported mem clock into a dict
self.supported_gr_clocks = dict()
for mem_clock in self.supported_mem_clocks:
supported_gr_clocks = pynvml.nvmlDeviceGetSupportedGraphicsClocks(self.dev, mem_clock)
self.supported_gr_clocks[mem_clock] = supported_gr_clocks
except pynvml.NVMLError_NotSupported:
self.gr_clock_default = None
self.sm_clock_default = None
self.mem_clock_default = None
self.supported_mem_clocks = []
self.supported_gr_clocks = dict()
示例6: _init_nvml
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def _init_nvml(self):
if self._load_nvidia_lib() == -1:
return -1
try:
global pynvml
import pip
pip.main(['install', '--quiet', 'nvidia-ml-py'])
import pynvml as pynvml
pynvml.nvmlInit()
return 0
except pynvml.NVMLError, err:
logger.debug('Failed to initialize NVML: ', err)
return -1
示例7: get_appropriate_cuda
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def get_appropriate_cuda(task_scale='s'):
if task_scale not in {'s','m','l'}:
logger.info('task scale wrong!')
exit(2)
import pynvml
pynvml.nvmlInit()
total_cuda_num = pynvml.nvmlDeviceGetCount()
for i in range(total_cuda_num):
logger.info(i)
handle = pynvml.nvmlDeviceGetHandleByIndex(i) # 这里的0是GPU id
memInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilizationInfo = pynvml.nvmlDeviceGetUtilizationRates(handle)
logger.info(i, 'mem:', memInfo.used / memInfo.total, 'util:',utilizationInfo.gpu)
if memInfo.used / memInfo.total < 0.15 and utilizationInfo.gpu <0.2:
logger.info(i,memInfo.used / memInfo.total)
return 'cuda:'+str(i)
if task_scale=='s':
max_memory=2000
elif task_scale=='m':
max_memory=6000
else:
max_memory = 9000
max_id = -1
for i in range(total_cuda_num):
handle = pynvml.nvmlDeviceGetHandleByIndex(0) # 这里的0是GPU id
memInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilizationInfo = pynvml.nvmlDeviceGetUtilizationRates(handle)
if max_memory < memInfo.free:
max_memory = memInfo.free
max_id = i
if id == -1:
logger.info('no appropriate gpu, wait!')
exit(2)
return 'cuda:'+str(max_id)
# if memInfo.used / memInfo.total < 0.5:
# return
示例8: pynvml_context
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def pynvml_context():
pynvml.nvmlInit()
yield
pynvml.nvmlShutdown()
示例9: run_logging_loop
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def run_logging_loop(async_task, async_loop):
asyncio.set_event_loop(async_loop)
pynvml.nvmlInit()
logger = _logger()
logger.info("Driver Version: {}".format(nativestr(pynvml.nvmlSystemGetDriverVersion())))
async_loop.run_until_complete(async_task)
logger.info("Shutting down driver")
pynvml.nvmlShutdown()
示例10: can_log_gpu_resources
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def can_log_gpu_resources():
if pynvml is None:
return False
try:
pynvml.nvmlInit()
return True
except pynvml.NVMLError:
return False
示例11: get_gpu_metrics
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def get_gpu_metrics() -> List:
try:
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
results = []
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
results += metrics_dict_to_list(query_gpu(handle))
return results
except pynvml.NVMLError:
logger.debug("Failed to collect gpu resources", exc_info=True)
return []
示例12: _monitor
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def _monitor(self):
pynvml.nvmlInit()
self._find_gpu()
current_sample = []
while not self.should_stop:
used_cpu = None
used_cpumem = None
used_gpu = None
used_gpumem = None
cpu_process = psutil.Process(self.pid)
used_cpu = cpu_process.cpu_percent() / psutil.cpu_count() # CPU utilization in %
used_cpumem = cpu_process.memory_info().rss // (1024*1024) # Memory use in MB
gpu_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(self.gpu)
for gpu_process in gpu_processes:
if gpu_process.pid == self.pid:
used_gpumem = gpu_process.usedGpuMemory // (1024*1024) # GPU memory use in MB
break
if self.accounting_enabled:
try:
stats = pynvml.nvmlDeviceGetAccountingStats(self.gpu, self.pid)
used_gpu = stats.gpuUtilization
except pynvml.NVMLError: # NVMLError_NotFound
pass
if not used_gpu:
util = pynvml.nvmlDeviceGetUtilizationRates(self.gpu)
used_gpu = util.gpu / len(gpu_processes) # Approximate based on number of processes
current_sample.append((used_cpu, used_cpumem, used_gpu, used_gpumem))
time.sleep(self.sampling_rate)
self.stats.append([round(sum(x) / len(x)) for x in zip(*current_sample)])
pynvml.nvmlShutdown()
示例13: _initialize
# 需要导入模块: import pynvml [as 别名]
# 或者: from pynvml import nvmlInit [as 别名]
def _initialize(self, log=False):
""" Initialize the library that will be returning stats for the system's GPU(s).
For Nvidia (on Linux and Windows) the library is `pynvml`. For Nvidia (on macOS) the
library is `pynvx`. For AMD `plaidML` is used.
Parameters
----------
log: bool, optional
Whether the class should output information to the logger. There may be occasions where
the logger has not yet been set up when this class is queried. Attempting to log in
these instances will raise an error. If GPU stats are being queried prior to the
logger being available then this parameter should be set to ``False``. Otherwise set
to ``True``. Default: ``False``
"""
if not self._initialized:
if get_backend() == "amd":
self._log("debug", "AMD Detected. Using plaidMLStats")
loglevel = "INFO" if self._logger is None else self._logger.getEffectiveLevel()
self._plaid = plaidlib(loglevel=loglevel, log=log)
elif IS_MACOS:
self._log("debug", "macOS Detected. Using pynvx")
try:
pynvx.cudaInit()
except RuntimeError:
self._initialized = True
return
else:
try:
self._log("debug", "OS is not macOS. Trying pynvml")
pynvml.nvmlInit()
except (pynvml.NVMLError_LibraryNotFound, # pylint: disable=no-member
pynvml.NVMLError_DriverNotLoaded, # pylint: disable=no-member
pynvml.NVMLError_NoPermission) as err: # pylint: disable=no-member
if plaidlib is not None:
self._log("debug", "pynvml errored. Trying plaidML")
self._plaid = plaidlib(log=log)
else:
msg = ("There was an error reading from the Nvidia Machine Learning "
"Library. Either you do not have an Nvidia GPU (in which case "
"this warning can be ignored) or the most likely cause is "
"incorrectly installed drivers. If this is the case, Please remove "
"and reinstall your Nvidia drivers before reporting."
"Original Error: {}".format(str(err)))
self._log("warning", msg)
self._initialized = True
return
except Exception as err: # pylint: disable=broad-except
msg = ("An unhandled exception occured loading pynvml. "
"Original error: {}".format(str(err)))
if self._logger:
self._logger.error(msg)
else:
print(msg)
self._initialized = True
return
self._initialized = True
self._get_device_count()
self._get_active_devices()
self._get_handles()