当前位置: 首页>>代码示例>>Python>>正文


Python cfg.NUM_GPUS属性代码示例

本文整理汇总了Python中core.config.cfg.NUM_GPUS属性的典型用法代码示例。如果您正苦于以下问题:Python cfg.NUM_GPUS属性的具体用法?Python cfg.NUM_GPUS怎么用?Python cfg.NUM_GPUS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在core.config.cfg的用法示例。


在下文中一共展示了cfg.NUM_GPUS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: UpdateIterStats

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def UpdateIterStats(self, model_out, inner_iter=None):
        """Update tracked iteration statistics."""
        if inner_iter is not None and self.misc_args.iter_size > 1:
            # For the case of using args.iter_size > 1
            return self._UpdateIterStats_inner(model_out, inner_iter)

        # Following code is saved for compatability of train_net.py and iter_size==1
        total_loss = 0

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.data[0]
            model_out['losses'][k] = loss
            self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        self.smoothed_total_loss.AddValue(total_loss.data[0]) 
开发者ID:ppengtang,项目名称:pcl.pytorch,代码行数:21,代码来源:training_stats.py

示例2: get_net

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def get_net(data_loader, name):
    logger = logging.getLogger(__name__)
    blob_names = data_loader.get_output_names()
    net = core.Net(name)
    net.type = 'dag'
    for gpu_id in range(cfg.NUM_GPUS):
        with core.NameScope('gpu_{}'.format(gpu_id)):
            with core.DeviceScope(muji.OnGPU(gpu_id)):
                for blob_name in blob_names:
                    blob = core.ScopedName(blob_name)
                    workspace.CreateBlob(blob)
                net.DequeueBlobs(
                    data_loader._blobs_queue_name, blob_names)
    logger.info("Protobuf:\n" + str(net.Proto()))

    return net 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:18,代码来源:test_loader.py

示例3: broadcast_parameters

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def broadcast_parameters(model):
    """Copy parameter blobs from GPU 0 over the corresponding parameter blobs
    on GPUs 1 through cfg.NUM_GPUS - 1.
    """
    if cfg.NUM_GPUS == 1:
        # no-op if only running on a single GPU
        return

    def _do_broadcast(all_blobs):
        assert len(all_blobs) % cfg.NUM_GPUS == 0, \
            ('Unexpected value for NUM_GPUS. Make sure you are not '
             'running single-GPU inference with NUM_GPUS > 1.')
        blobs_per_gpu = int(len(all_blobs) / cfg.NUM_GPUS)
        for i in range(blobs_per_gpu):
            blobs = [p for p in all_blobs[i::blobs_per_gpu]]
            data = workspace.FetchBlob(blobs[0])
            logger.debug('Broadcasting {} to'.format(str(blobs[0])))
            for i, p in enumerate(blobs[1:]):
                logger.debug(' |-> {}'.format(str(p)))
                with c2_utils.CudaScope(i + 1):
                    workspace.FeedBlob(p, data)

    _do_broadcast(model.params)
    _do_broadcast([b + '_momentum' for b in model.TrainableParams()]) 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:26,代码来源:net.py

示例4: GetStats

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def GetStats(self, cur_iter, lr):
        eta_seconds = self.iter_timer.average_time * (
            cfg.SOLVER.MAX_ITER - cur_iter
        )
        eta = str(datetime.timedelta(seconds=int(eta_seconds)))
        mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
        mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
        stats = dict(
            iter=cur_iter,
            lr=float(lr),
            time=self.iter_timer.average_time,
            loss=self.smoothed_total_loss.GetMedianValue(),
            eta=eta,
            mb_qsize=int(
                np.round(self.smoothed_mb_qsize.GetMedianValue())
            ),
            mem=int(np.ceil(mem_usage / 1024 / 1024))
        )
        for k, v in self.smoothed_losses_and_metrics.items():
            stats[k] = v.GetMedianValue()
        return stats 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:23,代码来源:training_stats.py

示例5: build_data_parallel_model

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def build_data_parallel_model(model, single_gpu_build_func):
    """Build a data parallel model given a function that builds the model on a
    single GPU.
    """
    if model.only_build_forward_pass:
        single_gpu_build_func(model)
    elif model.train:
        all_loss_gradients = _build_forward_graph(model, single_gpu_build_func)
        # Add backward pass on all GPUs
        model.AddGradientOperators(all_loss_gradients)
        if cfg.NUM_GPUS > 1:
            _add_allreduce_graph(model)
        for gpu_id in range(cfg.NUM_GPUS):
            # After allreduce, all GPUs perform SGD updates on their identical
            # params and gradients in parallel
            with c2_utils.NamedCudaScope(gpu_id):
                add_single_gpu_param_update_ops(model, gpu_id)
    else:
        # Test-time network operates on single GPU
        # Test-time parallelism is implemented through multiprocessing
        with c2_utils.NamedCudaScope(model.target_gpu_id):
            single_gpu_build_func(model) 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:24,代码来源:optimizer.py

示例6: _add_allreduce_graph

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def _add_allreduce_graph(model):
    """Construct the graph that performs Allreduce on the gradients."""
    # Need to all-reduce the per-GPU gradients if training with more than 1 GPU
    all_params = model.TrainableParams()
    assert len(all_params) % cfg.NUM_GPUS == 0
    # The model parameters are replicated on each GPU, get the number
    # distinct parameter blobs (i.e., the number of parameter blobs on
    # each GPU)
    params_per_gpu = int(len(all_params) / cfg.NUM_GPUS)
    with c2_utils.CudaScope(0):
        # Iterate over distinct parameter blobs
        for i in range(params_per_gpu):
            # Gradients from all GPUs for this parameter blob
            gradients = [
                model.param_to_grad[p] for p in all_params[i::params_per_gpu]
            ]
            if len(gradients) > 0:
                if cfg.USE_NCCL:
                    model.net.NCCLAllreduce(gradients, gradients)
                else:
                    muji.Allreduce(model.net, gradients, reduced_affix='') 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:23,代码来源:optimizer.py

示例7: __init__

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def __init__(self, **kwargs):
        # Handle args specific to the DetectionModelHelper, others pass through
        # to CNNModelHelper
        self.train = kwargs.get('train', False)
        self.num_classes = kwargs.get('num_classes', -1)
        assert self.num_classes > 0, 'num_classes must be > 0'
        for k in ('train', 'num_classes'):
            if k in kwargs:
                del kwargs[k]
        kwargs['order'] = 'NCHW'
        # Defensively set cudnn_exhaustive_search to False in case the default
        # changes in CNNModelHelper. The detection code uses variable size
        # inputs that might not play nicely with cudnn_exhaustive_search.
        kwargs['cudnn_exhaustive_search'] = False
        super(DetectionModelHelper, self).__init__(**kwargs)
        self.roi_data_loader = None
        self.losses = []
        self.metrics = []
        self.do_not_update_params = []  # Param on this list are not updated
        self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
        self.net.Proto().num_workers = cfg.NUM_GPUS * 4
        self.prev_use_cudnn = self.use_cudnn
        self.gn_params = []  # Param on this list are GroupNorm parameters 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:25,代码来源:detector.py

示例8: _CorrectMomentum

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def _CorrectMomentum(self, correction):
        """The MomentumSGDUpdate op implements the update V as

            V := mu * V + lr * grad,

        where mu is the momentum factor, lr is the learning rate, and grad is
        the stochastic gradient. Since V is not defined independently of the
        learning rate (as it should ideally be), when the learning rate is
        changed we should scale the update history V in order to make it
        compatible in scale with lr * grad.
        """
        logger.info(
            'Scaling update history by {:.6f} (new lr / old lr)'.
            format(correction))
        for i in range(cfg.NUM_GPUS):
            with c2_utils.CudaScope(i):
                for param in self.TrainableParams(gpu_id=i):
                    op = core.CreateOperator(
                        'Scale', [param + '_momentum'], [param + '_momentum'],
                        scale=correction)
                    workspace.RunOperatorOnce(op) 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:23,代码来源:detector.py

示例9: create

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def create(model_type_func, train=False, gpu_id=0):
    """Generic model creation function that dispatches to specific model
    building functions.

    By default, this function will generate a data parallel model configured to
    run on cfg.NUM_GPUS devices. However, you can restrict it to build a model
    targeted to a specific GPU by specifying gpu_id. This is used by
    optimizer.build_data_parallel_model() during test time.
    """
    model = DetectionModelHelper(
        name=model_type_func,
        train=train,
        num_classes=cfg.MODEL.NUM_CLASSES,
        init_params=train
    )
    model.only_build_forward_pass = False
    model.target_gpu_id = gpu_id
    return get_func(model_type_func)(model) 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:20,代码来源:model_builder.py

示例10: add_fast_rcnn_losses

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=1. / cfg.NUM_GPUS
    )
    loss_bbox = model.net.SmoothL1Loss(
        [
            'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
            'bbox_outside_weights'
        ],
        'loss_bbox',
        scale=1. / cfg.NUM_GPUS
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls', 'loss_bbox'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients


# ---------------------------------------------------------------------------- #
# Box heads
# ---------------------------------------------------------------------------- # 
开发者ID:lvpengyuan,项目名称:masktextspotter.caffe2,代码行数:26,代码来源:fast_rcnn_heads.py

示例11: __init__

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def __init__(self, **kwargs):
        # Handle args specific to the DetectionModelHelper, others pass through
        # to CNNModelHelper
        self.train = kwargs.get('train', False)
        self.num_classes = kwargs.get('num_classes', -1)
        assert self.num_classes > 0, 'num_classes must be > 0'
        for k in ('train', 'num_classes'):
            if k in kwargs:
                del kwargs[k]
        kwargs['order'] = 'NCHW'
        # Defensively set cudnn_exhaustive_search to False in case the default
        # changes in CNNModelHelper. The detection code uses variable size
        # inputs that might not play nicely with cudnn_exhaustive_search.
        kwargs['cudnn_exhaustive_search'] = False
        super(DetectionModelHelper, self).__init__(**kwargs)
        self.roi_data_loader = None
        self.losses = []
        self.metrics = []
        self.do_not_update_params = []  # Param on this list are not updated
        self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
        self.net.Proto().num_workers = cfg.NUM_GPUS * 4
        self.prev_use_cudnn = self.use_cudnn 
开发者ID:lvpengyuan,项目名称:masktextspotter.caffe2,代码行数:24,代码来源:detector.py

示例12: LogIterStats

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def LogIterStats(self, cur_iter, lr):
        """Log the tracked statistics."""
        if (cur_iter % self.LOG_PERIOD == 0 or
                cur_iter == cfg.SOLVER.MAX_ITER - 1):
            eta_seconds = self.iter_timer.average_time * (
                cfg.SOLVER.MAX_ITER - cur_iter
            )
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
            mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
            stats = dict(
                iter=cur_iter,
                lr=float(lr),
                time=self.iter_timer.average_time,
                loss=self.smoothed_total_loss.GetMedianValue(),
                eta=eta,
                mb_qsize=int(
                    np.round(self.smoothed_mb_qsize.GetMedianValue())
                ),
                mem=int(np.ceil(mem_usage / 1024 / 1024))
            )
            for k, v in self.smoothed_losses_and_metrics.items():
                stats[k] = v.GetMedianValue()
            log_json_stats(stats) 
开发者ID:lvpengyuan,项目名称:masktextspotter.caffe2,代码行数:26,代码来源:train_net.py

示例13: UpdateIterStats

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def UpdateIterStats(self, model_out, inner_iter=None):
        """Update tracked iteration statistics."""
        if inner_iter is not None and self.misc_args.iter_size > 1:
            # For the case of using args.iter_size > 1
            return self._UpdateIterStats_inner(model_out, inner_iter)

        # Following code is saved for compatability of train_net.py and iter_size==1
        total_loss = 0
        if cfg.FPN.FPN_ON:
            loss_rpn_cls_data = 0
            loss_rpn_bbox_data = 0

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.data[0]
            model_out['losses'][k] = loss
            if cfg.FPN.FPN_ON:
                if k.startswith('loss_rpn_cls_'):
                    loss_rpn_cls_data += loss_data
                elif k.startswith('loss_rpn_bbox_'):
                    loss_rpn_bbox_data += loss_data
            self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        self.smoothed_total_loss.AddValue(total_loss.data[0])
        if cfg.FPN.FPN_ON:
            self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
            self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)

        for k, metric in model_out['metrics'].items():
            metric = metric.mean(dim=0, keepdim=True)
            self.smoothed_metrics[k].AddValue(metric.data[0]) 
开发者ID:roytseng-tw,项目名称:Detectron.pytorch,代码行数:36,代码来源:training_stats.py

示例14: _UpdateIterStats_inner

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def _UpdateIterStats_inner(self, model_out, inner_iter):
        """Update tracked iteration statistics for the case of iter_size > 1"""
        assert inner_iter < self.misc_args.iter_size

        total_loss = 0

        if inner_iter == 0:
            self.inner_total_loss = []
            for k in model_out['losses']:
                self.inner_losses[k] = []

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.data[0]

            model_out['losses'][k] = loss

            self.inner_losses[k].append(loss_data)
            if inner_iter == (self.misc_args.iter_size - 1):
                loss_data = self._mean_and_reset_inner_list('inner_losses', k)
                self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        total_loss_data = total_loss.data[0]
        self.inner_total_loss.append(total_loss_data)
        if inner_iter == (self.misc_args.iter_size - 1):
            total_loss_data = self._mean_and_reset_inner_list('inner_total_loss')
            self.smoothed_total_loss.AddValue(total_loss_data) 
开发者ID:ppengtang,项目名称:pcl.pytorch,代码行数:32,代码来源:training_stats.py

示例15: UpdateIterStats

# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import NUM_GPUS [as 别名]
def UpdateIterStats(self, model_out, inner_iter=None):
        """Update tracked iteration statistics."""
        if inner_iter is not None and self.misc_args.iter_size > 1:
            # For the case of using args.iter_size > 1
            return self._UpdateIterStats_inner(model_out, inner_iter)

        # Following code is saved for compatability of train_net.py and iter_size==1
        total_loss = 0
        if cfg.FPN.FPN_ON:
            loss_rpn_cls_data = 0
            loss_rpn_bbox_data = 0

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.item()
            model_out['losses'][k] = loss
            if cfg.FPN.FPN_ON:
                if k.startswith('loss_rpn_cls_'):
                    loss_rpn_cls_data += loss_data
                elif k.startswith('loss_rpn_bbox_'):
                    loss_rpn_bbox_data += loss_data
            self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        self.smoothed_total_loss.AddValue(total_loss.item())
        if cfg.FPN.FPN_ON:
            self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
            self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)

        for k, metric in model_out['metrics'].items():
            metric = metric.mean(dim=0, keepdim=True)
            self.smoothed_metrics[k].AddValue(metric.item()) 
开发者ID:ruotianluo,项目名称:Context-aware-ZSR,代码行数:36,代码来源:training_stats.py


注:本文中的core.config.cfg.NUM_GPUS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。