當前位置: 首頁>>代碼示例>>Python>>正文


Python cfg.NUM_GPUS屬性代碼示例

本文整理匯總了Python中core.config.cfg.NUM_GPUS屬性的典型用法代碼示例。如果您正苦於以下問題:Python cfg.NUM_GPUS屬性的具體用法?Python cfg.NUM_GPUS怎麽用?Python cfg.NUM_GPUS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在core.config.cfg的用法示例。


在下文中一共展示了cfg.NUM_GPUS屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: UpdateIterStats

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def UpdateIterStats(self, model_out, inner_iter=None):
        """Update tracked iteration statistics."""
        if inner_iter is not None and self.misc_args.iter_size > 1:
            # For the case of using args.iter_size > 1
            return self._UpdateIterStats_inner(model_out, inner_iter)

        # Following code is saved for compatability of train_net.py and iter_size==1
        total_loss = 0

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.data[0]
            model_out['losses'][k] = loss
            self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        self.smoothed_total_loss.AddValue(total_loss.data[0]) 
開發者ID:ppengtang,項目名稱:pcl.pytorch,代碼行數:21,代碼來源:training_stats.py

示例2: get_net

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def get_net(data_loader, name):
    logger = logging.getLogger(__name__)
    blob_names = data_loader.get_output_names()
    net = core.Net(name)
    net.type = 'dag'
    for gpu_id in range(cfg.NUM_GPUS):
        with core.NameScope('gpu_{}'.format(gpu_id)):
            with core.DeviceScope(muji.OnGPU(gpu_id)):
                for blob_name in blob_names:
                    blob = core.ScopedName(blob_name)
                    workspace.CreateBlob(blob)
                net.DequeueBlobs(
                    data_loader._blobs_queue_name, blob_names)
    logger.info("Protobuf:\n" + str(net.Proto()))

    return net 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:18,代碼來源:test_loader.py

示例3: broadcast_parameters

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def broadcast_parameters(model):
    """Copy parameter blobs from GPU 0 over the corresponding parameter blobs
    on GPUs 1 through cfg.NUM_GPUS - 1.
    """
    if cfg.NUM_GPUS == 1:
        # no-op if only running on a single GPU
        return

    def _do_broadcast(all_blobs):
        assert len(all_blobs) % cfg.NUM_GPUS == 0, \
            ('Unexpected value for NUM_GPUS. Make sure you are not '
             'running single-GPU inference with NUM_GPUS > 1.')
        blobs_per_gpu = int(len(all_blobs) / cfg.NUM_GPUS)
        for i in range(blobs_per_gpu):
            blobs = [p for p in all_blobs[i::blobs_per_gpu]]
            data = workspace.FetchBlob(blobs[0])
            logger.debug('Broadcasting {} to'.format(str(blobs[0])))
            for i, p in enumerate(blobs[1:]):
                logger.debug(' |-> {}'.format(str(p)))
                with c2_utils.CudaScope(i + 1):
                    workspace.FeedBlob(p, data)

    _do_broadcast(model.params)
    _do_broadcast([b + '_momentum' for b in model.TrainableParams()]) 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:26,代碼來源:net.py

示例4: GetStats

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def GetStats(self, cur_iter, lr):
        eta_seconds = self.iter_timer.average_time * (
            cfg.SOLVER.MAX_ITER - cur_iter
        )
        eta = str(datetime.timedelta(seconds=int(eta_seconds)))
        mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
        mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
        stats = dict(
            iter=cur_iter,
            lr=float(lr),
            time=self.iter_timer.average_time,
            loss=self.smoothed_total_loss.GetMedianValue(),
            eta=eta,
            mb_qsize=int(
                np.round(self.smoothed_mb_qsize.GetMedianValue())
            ),
            mem=int(np.ceil(mem_usage / 1024 / 1024))
        )
        for k, v in self.smoothed_losses_and_metrics.items():
            stats[k] = v.GetMedianValue()
        return stats 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:23,代碼來源:training_stats.py

示例5: build_data_parallel_model

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def build_data_parallel_model(model, single_gpu_build_func):
    """Build a data parallel model given a function that builds the model on a
    single GPU.
    """
    if model.only_build_forward_pass:
        single_gpu_build_func(model)
    elif model.train:
        all_loss_gradients = _build_forward_graph(model, single_gpu_build_func)
        # Add backward pass on all GPUs
        model.AddGradientOperators(all_loss_gradients)
        if cfg.NUM_GPUS > 1:
            _add_allreduce_graph(model)
        for gpu_id in range(cfg.NUM_GPUS):
            # After allreduce, all GPUs perform SGD updates on their identical
            # params and gradients in parallel
            with c2_utils.NamedCudaScope(gpu_id):
                add_single_gpu_param_update_ops(model, gpu_id)
    else:
        # Test-time network operates on single GPU
        # Test-time parallelism is implemented through multiprocessing
        with c2_utils.NamedCudaScope(model.target_gpu_id):
            single_gpu_build_func(model) 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:24,代碼來源:optimizer.py

示例6: _add_allreduce_graph

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def _add_allreduce_graph(model):
    """Construct the graph that performs Allreduce on the gradients."""
    # Need to all-reduce the per-GPU gradients if training with more than 1 GPU
    all_params = model.TrainableParams()
    assert len(all_params) % cfg.NUM_GPUS == 0
    # The model parameters are replicated on each GPU, get the number
    # distinct parameter blobs (i.e., the number of parameter blobs on
    # each GPU)
    params_per_gpu = int(len(all_params) / cfg.NUM_GPUS)
    with c2_utils.CudaScope(0):
        # Iterate over distinct parameter blobs
        for i in range(params_per_gpu):
            # Gradients from all GPUs for this parameter blob
            gradients = [
                model.param_to_grad[p] for p in all_params[i::params_per_gpu]
            ]
            if len(gradients) > 0:
                if cfg.USE_NCCL:
                    model.net.NCCLAllreduce(gradients, gradients)
                else:
                    muji.Allreduce(model.net, gradients, reduced_affix='') 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:23,代碼來源:optimizer.py

示例7: __init__

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def __init__(self, **kwargs):
        # Handle args specific to the DetectionModelHelper, others pass through
        # to CNNModelHelper
        self.train = kwargs.get('train', False)
        self.num_classes = kwargs.get('num_classes', -1)
        assert self.num_classes > 0, 'num_classes must be > 0'
        for k in ('train', 'num_classes'):
            if k in kwargs:
                del kwargs[k]
        kwargs['order'] = 'NCHW'
        # Defensively set cudnn_exhaustive_search to False in case the default
        # changes in CNNModelHelper. The detection code uses variable size
        # inputs that might not play nicely with cudnn_exhaustive_search.
        kwargs['cudnn_exhaustive_search'] = False
        super(DetectionModelHelper, self).__init__(**kwargs)
        self.roi_data_loader = None
        self.losses = []
        self.metrics = []
        self.do_not_update_params = []  # Param on this list are not updated
        self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
        self.net.Proto().num_workers = cfg.NUM_GPUS * 4
        self.prev_use_cudnn = self.use_cudnn
        self.gn_params = []  # Param on this list are GroupNorm parameters 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:25,代碼來源:detector.py

示例8: _CorrectMomentum

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def _CorrectMomentum(self, correction):
        """The MomentumSGDUpdate op implements the update V as

            V := mu * V + lr * grad,

        where mu is the momentum factor, lr is the learning rate, and grad is
        the stochastic gradient. Since V is not defined independently of the
        learning rate (as it should ideally be), when the learning rate is
        changed we should scale the update history V in order to make it
        compatible in scale with lr * grad.
        """
        logger.info(
            'Scaling update history by {:.6f} (new lr / old lr)'.
            format(correction))
        for i in range(cfg.NUM_GPUS):
            with c2_utils.CudaScope(i):
                for param in self.TrainableParams(gpu_id=i):
                    op = core.CreateOperator(
                        'Scale', [param + '_momentum'], [param + '_momentum'],
                        scale=correction)
                    workspace.RunOperatorOnce(op) 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:23,代碼來源:detector.py

示例9: create

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def create(model_type_func, train=False, gpu_id=0):
    """Generic model creation function that dispatches to specific model
    building functions.

    By default, this function will generate a data parallel model configured to
    run on cfg.NUM_GPUS devices. However, you can restrict it to build a model
    targeted to a specific GPU by specifying gpu_id. This is used by
    optimizer.build_data_parallel_model() during test time.
    """
    model = DetectionModelHelper(
        name=model_type_func,
        train=train,
        num_classes=cfg.MODEL.NUM_CLASSES,
        init_params=train
    )
    model.only_build_forward_pass = False
    model.target_gpu_id = gpu_id
    return get_func(model_type_func)(model) 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:20,代碼來源:model_builder.py

示例10: add_fast_rcnn_losses

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def add_fast_rcnn_losses(model):
    """Add losses for RoI classification and bounding box regression."""
    cls_prob, loss_cls = model.net.SoftmaxWithLoss(
        ['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
        scale=1. / cfg.NUM_GPUS
    )
    loss_bbox = model.net.SmoothL1Loss(
        [
            'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
            'bbox_outside_weights'
        ],
        'loss_bbox',
        scale=1. / cfg.NUM_GPUS
    )
    loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
    model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
    model.AddLosses(['loss_cls', 'loss_bbox'])
    model.AddMetrics('accuracy_cls')
    return loss_gradients


# ---------------------------------------------------------------------------- #
# Box heads
# ---------------------------------------------------------------------------- # 
開發者ID:lvpengyuan,項目名稱:masktextspotter.caffe2,代碼行數:26,代碼來源:fast_rcnn_heads.py

示例11: __init__

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def __init__(self, **kwargs):
        # Handle args specific to the DetectionModelHelper, others pass through
        # to CNNModelHelper
        self.train = kwargs.get('train', False)
        self.num_classes = kwargs.get('num_classes', -1)
        assert self.num_classes > 0, 'num_classes must be > 0'
        for k in ('train', 'num_classes'):
            if k in kwargs:
                del kwargs[k]
        kwargs['order'] = 'NCHW'
        # Defensively set cudnn_exhaustive_search to False in case the default
        # changes in CNNModelHelper. The detection code uses variable size
        # inputs that might not play nicely with cudnn_exhaustive_search.
        kwargs['cudnn_exhaustive_search'] = False
        super(DetectionModelHelper, self).__init__(**kwargs)
        self.roi_data_loader = None
        self.losses = []
        self.metrics = []
        self.do_not_update_params = []  # Param on this list are not updated
        self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
        self.net.Proto().num_workers = cfg.NUM_GPUS * 4
        self.prev_use_cudnn = self.use_cudnn 
開發者ID:lvpengyuan,項目名稱:masktextspotter.caffe2,代碼行數:24,代碼來源:detector.py

示例12: LogIterStats

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def LogIterStats(self, cur_iter, lr):
        """Log the tracked statistics."""
        if (cur_iter % self.LOG_PERIOD == 0 or
                cur_iter == cfg.SOLVER.MAX_ITER - 1):
            eta_seconds = self.iter_timer.average_time * (
                cfg.SOLVER.MAX_ITER - cur_iter
            )
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
            mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
            stats = dict(
                iter=cur_iter,
                lr=float(lr),
                time=self.iter_timer.average_time,
                loss=self.smoothed_total_loss.GetMedianValue(),
                eta=eta,
                mb_qsize=int(
                    np.round(self.smoothed_mb_qsize.GetMedianValue())
                ),
                mem=int(np.ceil(mem_usage / 1024 / 1024))
            )
            for k, v in self.smoothed_losses_and_metrics.items():
                stats[k] = v.GetMedianValue()
            log_json_stats(stats) 
開發者ID:lvpengyuan,項目名稱:masktextspotter.caffe2,代碼行數:26,代碼來源:train_net.py

示例13: UpdateIterStats

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def UpdateIterStats(self, model_out, inner_iter=None):
        """Update tracked iteration statistics."""
        if inner_iter is not None and self.misc_args.iter_size > 1:
            # For the case of using args.iter_size > 1
            return self._UpdateIterStats_inner(model_out, inner_iter)

        # Following code is saved for compatability of train_net.py and iter_size==1
        total_loss = 0
        if cfg.FPN.FPN_ON:
            loss_rpn_cls_data = 0
            loss_rpn_bbox_data = 0

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.data[0]
            model_out['losses'][k] = loss
            if cfg.FPN.FPN_ON:
                if k.startswith('loss_rpn_cls_'):
                    loss_rpn_cls_data += loss_data
                elif k.startswith('loss_rpn_bbox_'):
                    loss_rpn_bbox_data += loss_data
            self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        self.smoothed_total_loss.AddValue(total_loss.data[0])
        if cfg.FPN.FPN_ON:
            self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
            self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)

        for k, metric in model_out['metrics'].items():
            metric = metric.mean(dim=0, keepdim=True)
            self.smoothed_metrics[k].AddValue(metric.data[0]) 
開發者ID:roytseng-tw,項目名稱:Detectron.pytorch,代碼行數:36,代碼來源:training_stats.py

示例14: _UpdateIterStats_inner

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def _UpdateIterStats_inner(self, model_out, inner_iter):
        """Update tracked iteration statistics for the case of iter_size > 1"""
        assert inner_iter < self.misc_args.iter_size

        total_loss = 0

        if inner_iter == 0:
            self.inner_total_loss = []
            for k in model_out['losses']:
                self.inner_losses[k] = []

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.data[0]

            model_out['losses'][k] = loss

            self.inner_losses[k].append(loss_data)
            if inner_iter == (self.misc_args.iter_size - 1):
                loss_data = self._mean_and_reset_inner_list('inner_losses', k)
                self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        total_loss_data = total_loss.data[0]
        self.inner_total_loss.append(total_loss_data)
        if inner_iter == (self.misc_args.iter_size - 1):
            total_loss_data = self._mean_and_reset_inner_list('inner_total_loss')
            self.smoothed_total_loss.AddValue(total_loss_data) 
開發者ID:ppengtang,項目名稱:pcl.pytorch,代碼行數:32,代碼來源:training_stats.py

示例15: UpdateIterStats

# 需要導入模塊: from core.config import cfg [as 別名]
# 或者: from core.config.cfg import NUM_GPUS [as 別名]
def UpdateIterStats(self, model_out, inner_iter=None):
        """Update tracked iteration statistics."""
        if inner_iter is not None and self.misc_args.iter_size > 1:
            # For the case of using args.iter_size > 1
            return self._UpdateIterStats_inner(model_out, inner_iter)

        # Following code is saved for compatability of train_net.py and iter_size==1
        total_loss = 0
        if cfg.FPN.FPN_ON:
            loss_rpn_cls_data = 0
            loss_rpn_bbox_data = 0

        for k, loss in model_out['losses'].items():
            assert loss.shape[0] == cfg.NUM_GPUS
            loss = loss.mean(dim=0, keepdim=True)
            total_loss += loss
            loss_data = loss.item()
            model_out['losses'][k] = loss
            if cfg.FPN.FPN_ON:
                if k.startswith('loss_rpn_cls_'):
                    loss_rpn_cls_data += loss_data
                elif k.startswith('loss_rpn_bbox_'):
                    loss_rpn_bbox_data += loss_data
            self.smoothed_losses[k].AddValue(loss_data)

        model_out['total_loss'] = total_loss  # Add the total loss for back propagation
        self.smoothed_total_loss.AddValue(total_loss.item())
        if cfg.FPN.FPN_ON:
            self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
            self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)

        for k, metric in model_out['metrics'].items():
            metric = metric.mean(dim=0, keepdim=True)
            self.smoothed_metrics[k].AddValue(metric.item()) 
開發者ID:ruotianluo,項目名稱:Context-aware-ZSR,代碼行數:36,代碼來源:training_stats.py


注:本文中的core.config.cfg.NUM_GPUS屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。