当前位置: 首页>>代码示例>>Python>>正文


Python GPUtil.getGPUs方法代码示例

本文整理汇总了Python中GPUtil.getGPUs方法的典型用法代码示例。如果您正苦于以下问题:Python GPUtil.getGPUs方法的具体用法?Python GPUtil.getGPUs怎么用?Python GPUtil.getGPUs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在GPUtil的用法示例。


在下文中一共展示了GPUtil.getGPUs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_available_device

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def get_available_device(max_memory=0.8):
    '''
    select available device based on the memory utilization status of the device
    :param max_memory: the maximum memory utilization ratio that is considered available
    :return: GPU id that is available, -1 means no GPU is available/uses CPU, if GPUtil package is not installed, will
    return 0 
    '''
    try:
        import GPUtil
    except ModuleNotFoundError:
        return 0

    GPUs = GPUtil.getGPUs()
    freeMemory = 0
    available=-1
    for GPU in GPUs:
        if GPU.memoryUtil > max_memory:
            continue
        if GPU.memoryFree >= freeMemory:
            freeMemory = GPU.memoryFree
            available = GPU.id

    return available 
开发者ID:Esri,项目名称:raster-deep-learning,代码行数:25,代码来源:prf_utils.py

示例2: _read_utilization

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def _read_utilization(self):
        with self.lock:
            if psutil is not None:
                self.values["cpu_util_percent"].append(
                    float(psutil.cpu_percent(interval=None)))
                self.values["ram_util_percent"].append(
                    float(getattr(psutil.virtual_memory(), "percent")))
            if GPUtil is not None:
                gpu_list = []
                try:
                    gpu_list = GPUtil.getGPUs()
                except Exception:
                    logger.debug("GPUtil failed to retrieve GPUs.")
                for gpu in gpu_list:
                    self.values["gpu_util_percent" + str(gpu.id)].append(
                        float(gpu.load))
                    self.values["vram_util_percent" + str(gpu.id)].append(
                        float(gpu.memoryUtil)) 
开发者ID:ray-project,项目名称:ray,代码行数:20,代码来源:util.py

示例3: GetDefaultScheduler

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def GetDefaultScheduler() -> GpuScheduler:
  gpus = GPUtil.getGPUs()
  if not gpus:
    raise NoGpuAvailable("No GPUs available")

  if os.environ.get("TEST_TARGET") and os.environ.get("TEST_WITH_GPU") != "1":
    raise NoGpuAvailable("GPUs disabled for tests")

  app.Log(
    2, "Creating default scheduler for %s", humanize.Plural(len(gpus), "GPU")
  )
  return GpuScheduler(
    {gpu: fasteners.InterProcessLock(_LOCK_DIR / str(gpu.id)) for gpu in gpus}
  )


# This function is memoized since we can always acquire the same lock twice. 
开发者ID:ChrisCummins,项目名称:clgen,代码行数:19,代码来源:gpu_scheduler.py

示例4: gpuname

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def gpuname():
    """Returns the model name of the first available GPU"""
    try:
        gpus = GPUtil.getGPUs()
    except:
        LOGGER.warning("Unable to detect GPU model. Is your GPU configured? Are you running with nvidia-docker?")
        return "UNKNOWN"
    if len(gpus) == 0:
        raise ValueError("No GPUs detected in the system")
    return gpus[0].name 
开发者ID:albarji,项目名称:neural-style-docker,代码行数:12,代码来源:algorithms.py

示例5: test_gpustorage

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def test_gpustorage(self):
        # Running basics tutorial problem
        with open("tutorials/data/basics_tutorial.pkl", "rb") as f:
            X, Y, L, D = pickle.load(f)

        Xs, Ys, Ls, Ds = split_data(
            X, Y, L, D, splits=[0.8, 0.1, 0.1], stratify_by=Y, seed=123
        )

        label_model = LabelModel(k=2, seed=123)
        label_model.train_model(Ls[0], Y_dev=Ys[1], n_epochs=500, log_train_every=25)
        Y_train_ps = label_model.predict_proba(Ls[0])

        # Creating a really large end model to use lots of memory
        end_model = EndModel([1000, 100000, 2], seed=123, device="cuda")

        # Getting initial GPU storage use
        initial_gpu_mem = GPUtil.getGPUs()[0].memoryUsed

        # Training model
        end_model.train_model(
            (Xs[0], Y_train_ps),
            valid_data=(Xs[1], Ys[1]),
            l2=0.1,
            batch_size=256,
            n_epochs=3,
            log_train_every=1,
            validation_metric="f1",
        )

        # Final GPU storage use
        final_gpu_mem = GPUtil.getGPUs()[0].memoryUsed

        # On a Titan X, this model uses ~ 3 GB of memory
        gpu_mem_difference = final_gpu_mem - initial_gpu_mem

        self.assertGreater(gpu_mem_difference, 1000) 
开发者ID:HazyResearch,项目名称:metal,代码行数:39,代码来源:test_gpu.py

示例6: _get_device_map

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def _get_device_map(self):
        self.logger.info('get devices')
        run_on_gpu = False
        device_map = [-1] * self.num_worker
        if not self.args.cpu:
            try:
                import GPUtil
                num_all_gpu = len(GPUtil.getGPUs())
                avail_gpu = GPUtil.getAvailable(order='memory', limit=min(num_all_gpu, self.num_worker),
                                                maxMemory=0.9, maxLoad=0.9)
                num_avail_gpu = len(avail_gpu)

                if num_avail_gpu >= self.num_worker:
                    run_on_gpu = True
                elif 0 < num_avail_gpu < self.num_worker:
                    self.logger.warning('only %d out of %d GPU(s) is available/free, but "-num_worker=%d"' %
                                        (num_avail_gpu, num_all_gpu, self.num_worker))
                    if not self.args.device_map:
                        self.logger.warning('multiple workers will be allocated to one GPU, '
                                            'may not scale well and may raise out-of-memory')
                    else:
                        self.logger.warning('workers will be allocated based on "-device_map=%s", '
                                            'may not scale well and may raise out-of-memory' % self.args.device_map)
                    run_on_gpu = True
                else:
                    self.logger.warning('no GPU available, fall back to CPU')

                if run_on_gpu:
                    device_map = ((self.args.device_map or avail_gpu) * self.num_worker)[: self.num_worker]
            except FileNotFoundError:
                self.logger.warning('nvidia-smi is missing, often means no gpu on this machine. '
                                    'fall back to cpu!')
        self.logger.info('device map: \n\t\t%s' % '\n\t\t'.join(
            'worker %2d -> %s' % (w_id, ('gpu %2d' % g_id) if g_id >= 0 else 'cpu') for w_id, g_id in
            enumerate(device_map)))
        return device_map 
开发者ID:hanxiao,项目名称:bert-as-service,代码行数:38,代码来源:__init__.py

示例7: __log_gpus

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def __log_gpus(self):
        for i, gpu in enumerate(GPUtil.getGPUs()):
            if i in self.gpu_ids:
                # self.writer.add_scalar('gpus/%d/%s' % (gpu.id, 'memoryTotal'), gpu.memoryTotal, step)
                # self.writer.add_scalar('gpus/%d/%s' % (gpu.id, 'memoryUsed'), gpu.memoryUsed, step)
                # self.writer.add_scalar('gpus/%d/%s' % (gpu.id, 'memoryFree'), gpu.memoryFree, step)
                self.writer.add_scalar('gpus/%d/%s' % (gpu.id, 'memoryUtil'), gpu.memoryUtil, self.step_writer)
            self.writer.add_scalar('gpus/recentMaxUtil', self.max_recent_util, self.step_writer)
        self.step_writer += 1 
开发者ID:antoyang,项目名称:NAS-Benchmark,代码行数:11,代码来源:gpu_thread.py

示例8: __update_recent

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def __update_recent(self):
        for i, gpu in enumerate(GPUtil.getGPUs()):
            if i in self.gpu_ids:
                self.max_recent_util = max(self.max_recent_util, gpu.memoryUtil)
        self.step_recent += 1 
开发者ID:antoyang,项目名称:NAS-Benchmark,代码行数:7,代码来源:gpu_thread.py

示例9: gpu_info

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def gpu_info():
    """Return a list of namedtuples representing attributes of each GPU
    device.
    """

    GPUInfo = namedtuple('GPUInfo', ['name', 'driver', 'totalmem', 'freemem'])
    gpus = GPUtil.getGPUs()
    info = []
    for g in gpus:
        info.append(GPUInfo(g.name, g.driver, g.memoryTotal, g.memoryFree))
    return info 
开发者ID:bwohlberg,项目名称:sporco,代码行数:13,代码来源:_gputil.py

示例10: gpu_load

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def gpu_load(wproc=0.5, wmem=0.5):
    """Return a list of namedtuples representing the current load for
    each GPU device. The processor and memory loads are fractions
    between 0 and 1. The weighted load represents a weighted average
    of processor and memory loads using the parameters `wproc` and
    `wmem` respectively.
    """

    GPULoad = namedtuple('GPULoad', ['processor', 'memory', 'weighted'])
    gpus = GPUtil.getGPUs()
    load = []
    for g in gpus:
        wload = (wproc * g.load + wmem * g.memoryUtil) / (wproc + wmem)
        load.append(GPULoad(g.load, g.memoryUtil, wload))
    return load 
开发者ID:bwohlberg,项目名称:sporco,代码行数:17,代码来源:_gputil.py

示例11: __init__

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def __init__(self, delay):
        super(Monitor, self).__init__()
        self.stopped = False
        self.delay = delay  # Time between calls to GPUtil
        self.recorded_gpu = []
        self.recorded_cpu = []
        self.recorded_memory = []
        self.gpu = GPUtil.getGPUs().pop() if GPUtil.getGPUs() else None
        self.start() 
开发者ID:microsoft,项目名称:OLive,代码行数:11,代码来源:monitor.py

示例12: parse

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()

        # === processing options === begin ===
        # determine which GPU to use
        # auto, throw exception when no GPU is available
        if self.opt.gpu_ids == 'auto':
            GPUtil.showUtilization()
            deviceIDs = GPUtil.getAvailable(order='first', limit=4, maxLoad=0.5, maxMemory=0.5,
                                            excludeID=[], excludeUUID=[])
            deviceID_costs = [-1*x for x in deviceIDs]
            # reorder the deviceID according to the computational capacity, i.e., total memory size
            # memory size is divided by 1000 without remainder, to avoid small fluctuation
            gpus = GPUtil.getGPUs()
            memory_size_costs = [-1*(gpu.memoryTotal//1000) for gpu in gpus if (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            names = [gpu.name for gpu in gpus if (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            sorted_idx = np.lexsort((deviceID_costs, memory_size_costs))

            self.opt.gpu_ids = [deviceIDs[sorted_idx[0]]]
            print('### selected GPU PCI_ID: %d, Name: %s ###' % (self.opt.gpu_ids[0], names[sorted_idx[0]]))
        else:
            # split into integer list, manual or multi-gpu
            self.opt.gpu_ids = list(map(int, self.opt.gpu_ids.split(',')))

        self.opt.device = torch.device("cuda:%d" % self.opt.gpu_ids[0] if (torch.cuda.is_available() and len(self.opt.gpu_ids) >= 1) else "cpu")
        # cuda.select_device(self.opt.gpu_ids[0])
        # torch.cuda.set_device(self.opt.gpu_ids[0])

        # set unique display_id
        self.opt.display_id = int(self.opt.display_id + 100 * self.opt.gpu_ids[0])

        # assure that 2d & 3d rot are not conflicting
        assert ((self.opt.rot_3d & self.opt.rot_horizontal) == False)
        # === processing options === end ===

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir =  os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt 
开发者ID:lijx10,项目名称:USIP,代码行数:56,代码来源:options_detector.py

示例13: process_opts

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def process_opts(self):
        assert self.opt is not None

        # === processing options === begin ===
        # determine which GPU to use
        # auto, throw exception when no GPU is available
        if self.opt.gpu_ids == 'auto':
            GPUtil.showUtilization()
            deviceIDs = GPUtil.getAvailable(order='first', limit=4, maxLoad=0.5, maxMemory=0.5,
                                            excludeID=[], excludeUUID=[])
            deviceID_costs = [-1 * x for x in deviceIDs]
            # reorder the deviceID according to the computational capacity, i.e., total memory size
            # memory size is divided by 1000 without remainder, to avoid small fluctuation
            gpus = GPUtil.getGPUs()
            memory_size_costs = [-1 * (gpu.memoryTotal // 1000) for gpu in gpus if
                                 (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            names = [gpu.name for gpu in gpus if (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            sorted_idx = np.lexsort((deviceID_costs, memory_size_costs))

            self.opt.gpu_ids = [deviceIDs[sorted_idx[0]]]
            print('### selected GPU PCI_ID: %d, Name: %s ###' % (self.opt.gpu_ids[0], names[sorted_idx[0]]))
        else:
            if type(self.opt.gpu_ids) == str:
                # split into integer list, manual or multi-gpu
                self.opt.gpu_ids = list(map(int, self.opt.gpu_ids.split(',')))

        self.opt.device = torch.device(
            "cuda:%d" % self.opt.gpu_ids[0] if (torch.cuda.is_available() and len(self.opt.gpu_ids) >= 1) else "cpu")
        # cuda.select_device(self.opt.gpu_ids[0])
        # torch.cuda.set_device(self.opt.gpu_ids[0])

        # set unique display_id
        self.opt.display_id = int(self.opt.display_id + 100 * self.opt.gpu_ids[0])

        # assure that 2d & 3d rot are not conflicting
        assert ((self.opt.rot_3d & self.opt.rot_horizontal) == False)
        # === processing options === end ===

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n') 
开发者ID:lijx10,项目名称:USIP,代码行数:56,代码来源:options_detector.py

示例14: worker_usage

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def worker_usage(session: Session, logger):
    provider = ComputerProvider(session)
    docker_provider = DockerProvider(session)

    computer = socket.gethostname()
    docker = docker_provider.get(computer, DOCKER_IMG)
    usages = []

    count = int(10 / WORKER_USAGE_INTERVAL)
    count = max(1, count)

    for _ in range(count):
        # noinspection PyProtectedMember
        memory = dict(psutil.virtual_memory()._asdict())

        try:
            gpus = GPUtil.getGPUs()
        except ValueError as err:
            logger.info(f"Active GPUs not found: {err}")
            gpus = []

        usage = {
            'cpu': psutil.cpu_percent(),
            'disk': disk(ROOT_FOLDER)[1],
            'memory': memory['percent'],
            'gpu': [
                {
                    'memory': 0 if np.isnan(
                        g.memoryUtil) else g.memoryUtil * 100,
                    'load': 0 if np.isnan(g.load) else g.load * 100
                } for g in gpus
            ]
        }

        provider.current_usage(computer, usage)
        usages.append(usage)
        docker.last_activity = now()
        docker_provider.update()

        time.sleep(WORKER_USAGE_INTERVAL)

    usage = json.dumps({'mean': dict_func(usages, np.mean)})
    provider.add(ComputerUsage(computer=computer, usage=usage, time=now())) 
开发者ID:lightforever,项目名称:mlcomp,代码行数:45,代码来源:__main__.py

示例15: main

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getGPUs [as 别名]
def main():
	args = parser.parse_args()

	try:
		with open(args.save) as fp:
			model_info = json.load(fp)
	except:
		model_info = {}

	gpu = GPUtil.getGPUs()[0]
	empty_gpu = gpu.memoryUsed
	
	for m in model_names:
		if not m in model_info.keys():
			
			# create model
			print("=> creating model '{}'".format(m))
			if args.pretrained.lower() not in ['false', 'none', 'not', 'no', '0']:
				print("=> using pre-trained parameters '{}'".format(args.pretrained))
				model = pretrainedmodels.__dict__[m](num_classes=1000,
					pretrained=args.pretrained)
			else:
				model = pretrainedmodels.__dict__[m]()

			cudnn.benchmark = True

			scale = 0.875

			print('Images transformed from size {} to {}'.format(
				int(round(max(model.input_size) / scale)),
				model.input_size))
			
			batch_sizes = [1, 2, 4, 8, 16, 32, 64]
			memory = []
			
			model = model.cuda().eval()
			for i, bs in enumerate(batch_sizes):
				with torch.no_grad():
					_ = model(torch.randn(bs, *model.input_size).cuda(non_blocking=True))
			
				gpu = GPUtil.getGPUs()[0]
				memory.append(gpu.memoryUsed - empty_gpu)

			model_info[m] = memory

			with open(args.save, 'w') as fp:
				json.dump(model_info, fp)

			del model
			torch.cuda.empty_cache() 
开发者ID:CeLuigi,项目名称:models-comparison.pytorch,代码行数:52,代码来源:compute_memory_usage.py


注:本文中的GPUtil.getGPUs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。