当前位置: 首页>>代码示例>>Python>>正文


Python GPUtil.getAvailable方法代码示例

本文整理汇总了Python中GPUtil.getAvailable方法的典型用法代码示例。如果您正苦于以下问题:Python GPUtil.getAvailable方法的具体用法?Python GPUtil.getAvailable怎么用?Python GPUtil.getAvailable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在GPUtil的用法示例。


在下文中一共展示了GPUtil.getAvailable方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_device_map

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def _get_device_map(self):
        self.logger.info('get devices')
        run_on_gpu = False
        device_map = [-1] * self.num_worker
        if not self.args.cpu:
            try:
                import GPUtil
                num_all_gpu = len(GPUtil.getGPUs())
                avail_gpu = GPUtil.getAvailable(order='memory', limit=min(num_all_gpu, self.num_worker),
                                                maxMemory=0.9, maxLoad=0.9)
                num_avail_gpu = len(avail_gpu)

                if num_avail_gpu >= self.num_worker:
                    run_on_gpu = True
                elif 0 < num_avail_gpu < self.num_worker:
                    self.logger.warning('only %d out of %d GPU(s) is available/free, but "-num_worker=%d"' %
                                        (num_avail_gpu, num_all_gpu, self.num_worker))
                    if not self.args.device_map:
                        self.logger.warning('multiple workers will be allocated to one GPU, '
                                            'may not scale well and may raise out-of-memory')
                    else:
                        self.logger.warning('workers will be allocated based on "-device_map=%s", '
                                            'may not scale well and may raise out-of-memory' % self.args.device_map)
                    run_on_gpu = True
                else:
                    self.logger.warning('no GPU available, fall back to CPU')

                if run_on_gpu:
                    device_map = ((self.args.device_map or avail_gpu) * self.num_worker)[: self.num_worker]
            except FileNotFoundError:
                self.logger.warning('nvidia-smi is missing, often means no gpu on this machine. '
                                    'fall back to cpu!')
        self.logger.info('device map: \n\t\t%s' % '\n\t\t'.join(
            'worker %2d -> %s' % (w_id, ('gpu %2d' % g_id) if g_id >= 0 else 'cpu') for w_id, g_id in
            enumerate(device_map)))
        return device_map 
开发者ID:hanxiao,项目名称:bert-as-service,代码行数:38,代码来源:__init__.py

示例2: get_gpu_info

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def get_gpu_info():
    return GPUtil.getAvailable(order = 'memory', limit = 10, maxLoad = 0.25, maxMemory = 0.25, includeNan=False, excludeID=[], excludeUUID=[]) 
开发者ID:BMW-InnovationLab,项目名称:BMW-TensorFlow-Training-GUI,代码行数:4,代码来源:main.py

示例3: autoset_settings

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def autoset_settings(set_var):
    """Autoset GPU parameters using CUDA_VISIBLE_DEVICES variables.

    Return default config if variable not set.
    :param set_var: Variable to set. Must be of type ConfigSettings
    """
    try:
        devices = ast.literal_eval(os.environ["CUDA_VISIBLE_DEVICES"])
        if type(devices) != list and type(devices) != tuple:
            devices = [devices]
        if len(devices) != 0:
            set_var.GPU = len(devices)
            set_var.NJOBS = len(devices)
            warnings.warn("Detecting CUDA device(s) : {}".format(devices))

    except KeyError:
        try:
            set_var.GPU = len(GPUtil.getAvailable(order='first', limit=8,
                                                  maxLoad=0.5, maxMemory=0.5,
                                                  includeNan=False))

            if not set_var.GPU:
                warnings.warn("No GPU automatically detected. Setting SETTINGS.GPU to 0, " +
                              "and SETTINGS.NJOBS to cpu_count.")
                set_var.GPU = 0
                set_var.NJOBS = multiprocessing.cpu_count()
            else:
                set_var.NJOBS = set_var.GPU
                warnings.warn("Detecting {} CUDA device(s).".format(set_var.GPU))

        except ValueError:
            warnings.warn("No GPU automatically detected. Setting SETTINGS.GPU to 0, " +
                          "and SETTINGS.NJOBS to cpu_count.")
            set_var.GPU = 0
            set_var.NJOBS = multiprocessing.cpu_count()

    return set_var 
开发者ID:FenTechSolutions,项目名称:CausalDiscoveryToolbox,代码行数:39,代码来源:Settings.py

示例4: available_gpu

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def available_gpu(*args, **kwargs):
    """This function is an alias for ``GPUtil.getAvailable``. If
    ``GPUtil`` is not installed, it returns [0,] as a default GPU ID."""

    return GPUtil.getAvailable(*args, **kwargs) 
开发者ID:bwohlberg,项目名称:sporco,代码行数:7,代码来源:_gputil.py

示例5: run

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def run(self):
        available_gpus = range(self.num_worker)
        run_on_gpu = True
        num_req = 0
        try:
            import GPUtil
            available_gpus = GPUtil.getAvailable(limit=self.num_worker)
            if len(available_gpus) < self.num_worker:
                self.logger.warn('only %d GPU(s) is available, but ask for %d' % (len(available_gpus), self.num_worker))
        except FileNotFoundError:
            self.logger.warn('nvidia-smi is missing, often means no gpu found on this machine. '
                             'will run service on cpu instead')
            run_on_gpu = False

        # start the backend processes
        for i in available_gpus:
            process = BertWorker(i, self.args, self.addr_backend, self.addr_sink)
            self.processes.append(process)
            process.start()

        try:
            while True:
                client, msg = self.frontend.recv_multipart()
                if msg == ServerCommand.show_config:
                    self.sink.send_multipart([client, msg,
                                              jsonapi.dumps({**{'client': client.decode('ascii'),
                                                                'num_subprocess': len(self.processes),
                                                                'frontend -> backend': self.addr_backend,
                                                                'backend -> sink': self.addr_sink,
                                                                'frontend <-> sink': self.addr_front2sink,
                                                                'server_current_time': str(datetime.now()),
                                                                'run_on_gpu': run_on_gpu,
                                                                'num_request': num_req},
                                                             **self.args_dict})])
                    continue

                num_req += 1
                client = client + b'#' + str(uuid.uuid4()).encode('ascii')
                seqs = jsonapi.loads(msg)
                num_seqs = len(seqs)
                # tell sink to collect a new job
                self.sink.send_multipart([client, ServerCommand.new_job, b'%d' % num_seqs])

                if num_seqs > self.max_batch_size:
                    # divide the large batch into small batches
                    s_idx = 0
                    while s_idx < num_seqs:
                        tmp = seqs[s_idx: (s_idx + self.max_batch_size)]
                        if tmp:
                            # get the worker with minimum workload
                            client_partial_id = client + b'@%d' % s_idx
                            self.backend.send_multipart([client_partial_id, jsonapi.dumps(tmp)])
                        s_idx += len(tmp)
                else:
                    self.backend.send_multipart([client, msg])
        except zmq.error.ContextTerminated:
            self.logger.error('context is closed!') 
开发者ID:a414351664,项目名称:Bert-TextClassification,代码行数:59,代码来源:server.py

示例6: parse

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()

        # === processing options === begin ===
        # determine which GPU to use
        # auto, throw exception when no GPU is available
        if self.opt.gpu_ids == 'auto':
            GPUtil.showUtilization()
            deviceIDs = GPUtil.getAvailable(order='first', limit=4, maxLoad=0.5, maxMemory=0.5,
                                            excludeID=[], excludeUUID=[])
            deviceID_costs = [-1*x for x in deviceIDs]
            # reorder the deviceID according to the computational capacity, i.e., total memory size
            # memory size is divided by 1000 without remainder, to avoid small fluctuation
            gpus = GPUtil.getGPUs()
            memory_size_costs = [-1*(gpu.memoryTotal//1000) for gpu in gpus if (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            names = [gpu.name for gpu in gpus if (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            sorted_idx = np.lexsort((deviceID_costs, memory_size_costs))

            self.opt.gpu_ids = [deviceIDs[sorted_idx[0]]]
            print('### selected GPU PCI_ID: %d, Name: %s ###' % (self.opt.gpu_ids[0], names[sorted_idx[0]]))
        else:
            # split into integer list, manual or multi-gpu
            self.opt.gpu_ids = list(map(int, self.opt.gpu_ids.split(',')))

        self.opt.device = torch.device("cuda:%d" % self.opt.gpu_ids[0] if (torch.cuda.is_available() and len(self.opt.gpu_ids) >= 1) else "cpu")
        # cuda.select_device(self.opt.gpu_ids[0])
        # torch.cuda.set_device(self.opt.gpu_ids[0])

        # set unique display_id
        self.opt.display_id = int(self.opt.display_id + 100 * self.opt.gpu_ids[0])

        # assure that 2d & 3d rot are not conflicting
        assert ((self.opt.rot_3d & self.opt.rot_horizontal) == False)
        # === processing options === end ===

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir =  os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt 
开发者ID:lijx10,项目名称:USIP,代码行数:56,代码来源:options_detector.py

示例7: process_opts

# 需要导入模块: import GPUtil [as 别名]
# 或者: from GPUtil import getAvailable [as 别名]
def process_opts(self):
        assert self.opt is not None

        # === processing options === begin ===
        # determine which GPU to use
        # auto, throw exception when no GPU is available
        if self.opt.gpu_ids == 'auto':
            GPUtil.showUtilization()
            deviceIDs = GPUtil.getAvailable(order='first', limit=4, maxLoad=0.5, maxMemory=0.5,
                                            excludeID=[], excludeUUID=[])
            deviceID_costs = [-1 * x for x in deviceIDs]
            # reorder the deviceID according to the computational capacity, i.e., total memory size
            # memory size is divided by 1000 without remainder, to avoid small fluctuation
            gpus = GPUtil.getGPUs()
            memory_size_costs = [-1 * (gpu.memoryTotal // 1000) for gpu in gpus if
                                 (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            names = [gpu.name for gpu in gpus if (gpu.load < 0.5 and gpu.memoryUtil < 0.5)]
            sorted_idx = np.lexsort((deviceID_costs, memory_size_costs))

            self.opt.gpu_ids = [deviceIDs[sorted_idx[0]]]
            print('### selected GPU PCI_ID: %d, Name: %s ###' % (self.opt.gpu_ids[0], names[sorted_idx[0]]))
        else:
            if type(self.opt.gpu_ids) == str:
                # split into integer list, manual or multi-gpu
                self.opt.gpu_ids = list(map(int, self.opt.gpu_ids.split(',')))

        self.opt.device = torch.device(
            "cuda:%d" % self.opt.gpu_ids[0] if (torch.cuda.is_available() and len(self.opt.gpu_ids) >= 1) else "cpu")
        # cuda.select_device(self.opt.gpu_ids[0])
        # torch.cuda.set_device(self.opt.gpu_ids[0])

        # set unique display_id
        self.opt.display_id = int(self.opt.display_id + 100 * self.opt.gpu_ids[0])

        # assure that 2d & 3d rot are not conflicting
        assert ((self.opt.rot_3d & self.opt.rot_horizontal) == False)
        # === processing options === end ===

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n') 
开发者ID:lijx10,项目名称:USIP,代码行数:56,代码来源:options_detector.py


注:本文中的GPUtil.getAvailable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。