當前位置: 首頁>>代碼示例>>Python>>正文


Python pynvml.nvmlInit方法代碼示例

本文整理匯總了Python中pynvml.nvmlInit方法的典型用法代碼示例。如果您正苦於以下問題:Python pynvml.nvmlInit方法的具體用法?Python pynvml.nvmlInit怎麽用?Python pynvml.nvmlInit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pynvml的用法示例。


在下文中一共展示了pynvml.nvmlInit方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: getFreeId

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def getFreeId():
    import pynvml 

    pynvml.nvmlInit()
    def getFreeRatio(id):
        handle = pynvml.nvmlDeviceGetHandleByIndex(id)
        use = pynvml.nvmlDeviceGetUtilizationRates(handle)
        ratio = 0.5*(float(use.gpu+float(use.memory)))
        return ratio

    deviceCount = pynvml.nvmlDeviceGetCount()
    available = []
    for i in range(deviceCount):
        if getFreeRatio(i)<70:
            available.append(i)
    gpus = ''
    for g in available:
        gpus = gpus+str(g)+','
    gpus = gpus[:-1]
    return gpus 
開發者ID:uci-cbcl,項目名稱:DeepLung,代碼行數:22,代碼來源:utils.py

示例2: getGPUUsage

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def getGPUUsage():
    try:
        pynvml.nvmlInit()
        count = pynvml.nvmlDeviceGetCount()
        if count == 0:
            return None

        result = {"driver": pynvml.nvmlSystemGetDriverVersion(),
                  "gpu_count": int(count)
                  }
        i = 0
        gpuData = []
        while i<count:
            handle = pynvml.nvmlDeviceGetHandleByIndex(i)
            mem = pynvml.nvmlDeviceGetMemoryInfo(handle)
            gpuData.append({"device_num": i, "name": pynvml.nvmlDeviceGetName(handle), "total": round(float(mem.total)/1000000000, 2), "used": round(float(mem.used)/1000000000, 2)})
            i = i+1

        result["devices"] = jsonpickle.encode(gpuData, unpicklable=False)
    except Exception as e:
        result = {"driver": "No GPU!", "gpu_count": 0, "devices": []}

    return result 
開發者ID:tech-quantum,項目名稱:sia-cog,代碼行數:25,代碼來源:sysinfo.py

示例3: gpu_info

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def gpu_info(self):
        # pip install nvidia-ml-py3
        if len(self.gpu_ids) >= 0 and torch.cuda.is_available():
            try:
                import pynvml
                pynvml.nvmlInit()
                self.config_dic['gpu_driver_version'] = pynvml.nvmlSystemGetDriverVersion()
                for gpu_id in self.gpu_ids:
                    handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
                    gpu_id_name = "gpu%s" % gpu_id
                    mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                    gpu_utilize = pynvml.nvmlDeviceGetUtilizationRates(handle)
                    self.config_dic['%s_device_name' % gpu_id_name] = pynvml.nvmlDeviceGetName(handle)
                    self.config_dic['%s_mem_total' % gpu_id_name] = gpu_mem_total = round(mem_info.total / 1024 ** 3, 2)
                    self.config_dic['%s_mem_used' % gpu_id_name] = gpu_mem_used = round(mem_info.used / 1024 ** 3, 2)
                    # self.config_dic['%s_mem_free' % gpu_id_name] = gpu_mem_free = mem_info.free // 1024 ** 2
                    self.config_dic['%s_mem_percent' % gpu_id_name] = round((gpu_mem_used / gpu_mem_total) * 100, 1)
                    self._set_dict_smooth('%s_utilize_gpu' % gpu_id_name, gpu_utilize.gpu, 0.8)
                    # self.config_dic['%s_utilize_gpu' % gpu_id_name] = gpu_utilize.gpu
                    # self.config_dic['%s_utilize_memory' % gpu_id_name] = gpu_utilize.memory

                pynvml.nvmlShutdown()
            except Exception as e:
                print(e) 
開發者ID:dingguanglei,項目名稱:jdit,代碼行數:26,代碼來源:super.py

示例4: getFreeId

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def getFreeId():
    import pynvml

    pynvml.nvmlInit()

    def getFreeRatio(id):
        handle = pynvml.nvmlDeviceGetHandleByIndex(id)
        use = pynvml.nvmlDeviceGetUtilizationRates(handle)
        ratio = 0.5 * (float(use.gpu + float(use.memory)))
        return ratio

    deviceCount = pynvml.nvmlDeviceGetCount()
    available = []
    for i in range(deviceCount):
        if getFreeRatio(i) < 70:
            available.append(i)
    gpus = ''
    for g in available:
        gpus = gpus + str(g) + ','
    gpus = gpus[:-1]
    return gpus 
開發者ID:HLIG,項目名稱:HUAWEIOCR-2019,代碼行數:23,代碼來源:utils.py

示例5: __init__

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def __init__(self, id=0):
        """Create object to control device using NVML"""

        pynvml.nvmlInit()
        self.dev = pynvml.nvmlDeviceGetHandleByIndex(id)

        try:
            self._pwr_limit = pynvml.nvmlDeviceGetPowerManagementLimit(self.dev)
            self.pwr_constraints = pynvml.nvmlDeviceGetPowerManagementLimitConstraints(self.dev)
        except pynvml.NVMLError_NotSupported:
            self._pwr_limit = None
            self.pwr_constraints = [1, 0] # inverted range to make all range checks fail

        try:
            self._persistence_mode = pynvml.nvmlDeviceGetPersistenceMode(self.dev)
        except pynvml.NVMLError_NotSupported:
            self._persistence_mode = None

        try:
            self._auto_boost = pynvml.nvmlDeviceGetAutoBoostedClocksEnabled(self.dev)[0]  # returns [isEnabled, isDefaultEnabled]
        except pynvml.NVMLError_NotSupported:
            self._auto_boost = None

        try:
            self.gr_clock_default = pynvml.nvmlDeviceGetDefaultApplicationsClock(self.dev, pynvml.NVML_CLOCK_GRAPHICS)
            self.sm_clock_default = pynvml.nvmlDeviceGetDefaultApplicationsClock(self.dev, pynvml.NVML_CLOCK_SM)
            self.mem_clock_default = pynvml.nvmlDeviceGetDefaultApplicationsClock(self.dev, pynvml.NVML_CLOCK_MEM)

            self.supported_mem_clocks = pynvml.nvmlDeviceGetSupportedMemoryClocks(self.dev)

            #gather the supported gr clocks for each supported mem clock into a dict
            self.supported_gr_clocks = dict()
            for mem_clock in self.supported_mem_clocks:
                supported_gr_clocks = pynvml.nvmlDeviceGetSupportedGraphicsClocks(self.dev, mem_clock)
                self.supported_gr_clocks[mem_clock] = supported_gr_clocks
        except pynvml.NVMLError_NotSupported:
            self.gr_clock_default = None
            self.sm_clock_default = None
            self.mem_clock_default = None
            self.supported_mem_clocks = []
            self.supported_gr_clocks = dict() 
開發者ID:benvanwerkhoven,項目名稱:kernel_tuner,代碼行數:43,代碼來源:nvml.py

示例6: _init_nvml

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def _init_nvml(self):
        if self._load_nvidia_lib() == -1:
            return -1

        try:
            global pynvml
            import pip
            pip.main(['install', '--quiet', 'nvidia-ml-py'])
            import pynvml as pynvml
            pynvml.nvmlInit()
            return 0
        except pynvml.NVMLError, err:
            logger.debug('Failed to initialize NVML: ', err)
            return -1 
開發者ID:cloudviz,項目名稱:agentless-system-crawler,代碼行數:16,代碼來源:gpu_host_crawler.py

示例7: get_appropriate_cuda

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def get_appropriate_cuda(task_scale='s'):
    if task_scale not in {'s','m','l'}:
        logger.info('task scale wrong!')
        exit(2)
    import pynvml
    pynvml.nvmlInit()
    total_cuda_num = pynvml.nvmlDeviceGetCount()
    for i in range(total_cuda_num):
        logger.info(i)
        handle = pynvml.nvmlDeviceGetHandleByIndex(i)  # 這裏的0是GPU id
        memInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
        utilizationInfo = pynvml.nvmlDeviceGetUtilizationRates(handle)
        logger.info(i, 'mem:', memInfo.used / memInfo.total, 'util:',utilizationInfo.gpu)
        if memInfo.used / memInfo.total < 0.15 and utilizationInfo.gpu <0.2:
            logger.info(i,memInfo.used / memInfo.total)
            return 'cuda:'+str(i)

    if task_scale=='s':
        max_memory=2000
    elif task_scale=='m':
        max_memory=6000
    else:
        max_memory = 9000

    max_id = -1
    for i in range(total_cuda_num):
        handle = pynvml.nvmlDeviceGetHandleByIndex(0)  # 這裏的0是GPU id
        memInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
        utilizationInfo = pynvml.nvmlDeviceGetUtilizationRates(handle)
        if max_memory < memInfo.free:
            max_memory = memInfo.free
            max_id = i

    if id == -1:
        logger.info('no appropriate gpu, wait!')
        exit(2)

    return 'cuda:'+str(max_id)

        # if memInfo.used / memInfo.total < 0.5:
        #     return 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:43,代碼來源:utils.py

示例8: pynvml_context

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def pynvml_context():
    pynvml.nvmlInit()
    yield
    pynvml.nvmlShutdown() 
開發者ID:msalvaris,項目名稱:gpu_monitor,代碼行數:6,代碼來源:gpu_interface.py

示例9: run_logging_loop

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def run_logging_loop(async_task, async_loop):
    asyncio.set_event_loop(async_loop)
    pynvml.nvmlInit()
    logger = _logger()
    logger.info("Driver Version: {}".format(nativestr(pynvml.nvmlSystemGetDriverVersion())))
    async_loop.run_until_complete(async_task)
    logger.info("Shutting down driver")
    pynvml.nvmlShutdown() 
開發者ID:msalvaris,項目名稱:gpu_monitor,代碼行數:10,代碼來源:gpu_interface.py

示例10: can_log_gpu_resources

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def can_log_gpu_resources():
    if pynvml is None:
        return False

    try:
        pynvml.nvmlInit()
        return True
    except pynvml.NVMLError:
        return False 
開發者ID:polyaxon,項目名稱:polyaxon,代碼行數:11,代碼來源:gpu_processor.py

示例11: get_gpu_metrics

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def get_gpu_metrics() -> List:
    try:
        pynvml.nvmlInit()
        device_count = pynvml.nvmlDeviceGetCount()
        results = []

        for i in range(device_count):
            handle = pynvml.nvmlDeviceGetHandleByIndex(i)
            results += metrics_dict_to_list(query_gpu(handle))
        return results
    except pynvml.NVMLError:
        logger.debug("Failed to collect gpu resources", exc_info=True)
        return [] 
開發者ID:polyaxon,項目名稱:polyaxon,代碼行數:15,代碼來源:gpu_processor.py

示例12: _monitor

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def _monitor(self):
        pynvml.nvmlInit()
        self._find_gpu()
        current_sample = []
        while not self.should_stop:
            used_cpu = None
            used_cpumem = None
            used_gpu = None
            used_gpumem = None
            
            cpu_process = psutil.Process(self.pid)
            used_cpu = cpu_process.cpu_percent() / psutil.cpu_count() # CPU utilization in %
            used_cpumem = cpu_process.memory_info().rss // (1024*1024) # Memory use in MB

            gpu_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(self.gpu)
            for gpu_process in gpu_processes:
                if gpu_process.pid == self.pid:
                    used_gpumem = gpu_process.usedGpuMemory // (1024*1024) # GPU memory use in MB
                    break

            if self.accounting_enabled:
                try:
                    stats = pynvml.nvmlDeviceGetAccountingStats(self.gpu, self.pid)
                    used_gpu = stats.gpuUtilization
                except pynvml.NVMLError: # NVMLError_NotFound
                    pass

            if not used_gpu:
                util = pynvml.nvmlDeviceGetUtilizationRates(self.gpu)
                used_gpu = util.gpu / len(gpu_processes) # Approximate based on number of processes

            current_sample.append((used_cpu, used_cpumem, used_gpu, used_gpumem))

            time.sleep(self.sampling_rate)

        self.stats.append([round(sum(x) / len(x)) for x in zip(*current_sample)])
        pynvml.nvmlShutdown() 
開發者ID:vlimant,項目名稱:mpi_learn,代碼行數:39,代碼來源:monitor.py

示例13: _initialize

# 需要導入模塊: import pynvml [as 別名]
# 或者: from pynvml import nvmlInit [as 別名]
def _initialize(self, log=False):
        """ Initialize the library that will be returning stats for the system's GPU(s).
        For Nvidia (on Linux and Windows) the library is `pynvml`. For Nvidia (on macOS) the
        library is `pynvx`. For AMD `plaidML` is used.

        Parameters
        ----------
        log: bool, optional
            Whether the class should output information to the logger. There may be occasions where
            the logger has not yet been set up when this class is queried. Attempting to log in
            these instances will raise an error. If GPU stats are being queried prior to the
            logger being available then this parameter should be set to ``False``. Otherwise set
            to ``True``. Default: ``False``
        """
        if not self._initialized:
            if get_backend() == "amd":
                self._log("debug", "AMD Detected. Using plaidMLStats")
                loglevel = "INFO" if self._logger is None else self._logger.getEffectiveLevel()
                self._plaid = plaidlib(loglevel=loglevel, log=log)
            elif IS_MACOS:
                self._log("debug", "macOS Detected. Using pynvx")
                try:
                    pynvx.cudaInit()
                except RuntimeError:
                    self._initialized = True
                    return
            else:
                try:
                    self._log("debug", "OS is not macOS. Trying pynvml")
                    pynvml.nvmlInit()
                except (pynvml.NVMLError_LibraryNotFound,  # pylint: disable=no-member
                        pynvml.NVMLError_DriverNotLoaded,  # pylint: disable=no-member
                        pynvml.NVMLError_NoPermission) as err:  # pylint: disable=no-member
                    if plaidlib is not None:
                        self._log("debug", "pynvml errored. Trying plaidML")
                        self._plaid = plaidlib(log=log)
                    else:
                        msg = ("There was an error reading from the Nvidia Machine Learning "
                               "Library. Either you do not have an Nvidia GPU (in which case "
                               "this warning can be ignored) or the most likely cause is "
                               "incorrectly installed drivers. If this is the case, Please remove "
                               "and reinstall your Nvidia drivers before reporting."
                               "Original Error: {}".format(str(err)))
                        self._log("warning", msg)
                        self._initialized = True
                        return
                except Exception as err:  # pylint: disable=broad-except
                    msg = ("An unhandled exception occured loading pynvml. "
                           "Original error: {}".format(str(err)))
                    if self._logger:
                        self._logger.error(msg)
                    else:
                        print(msg)
                    self._initialized = True
                    return
            self._initialized = True
            self._get_device_count()
            self._get_active_devices()
            self._get_handles() 
開發者ID:deepfakes,項目名稱:faceswap,代碼行數:61,代碼來源:gpu_stats.py


注:本文中的pynvml.nvmlInit方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。