當前位置: 首頁>>代碼示例>>Python>>正文


Python parallel.convert_syncbn_model方法代碼示例

本文整理匯總了Python中apex.parallel.convert_syncbn_model方法的典型用法代碼示例。如果您正苦於以下問題:Python parallel.convert_syncbn_model方法的具體用法?Python parallel.convert_syncbn_model怎麽用?Python parallel.convert_syncbn_model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在apex.parallel的用法示例。


在下文中一共展示了parallel.convert_syncbn_model方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from apex import parallel [as 別名]
# 或者: from apex.parallel import convert_syncbn_model [as 別名]
def __init__(self, system_config, model, distributed=False, gpu=None):
        super(NetworkFactory, self).__init__()

        self.system_config = system_config

        self.gpu = gpu
        self.model = DummyModule(model)
        self.loss = model.loss
        self.network = Network(self.model, self.loss)

        if distributed:
            from apex.parallel import DistributedDataParallel, convert_syncbn_model
            torch.cuda.set_device(gpu)
            self.network = self.network.cuda(gpu)
            self.network = convert_syncbn_model(self.network)
            self.network = DistributedDataParallel(self.network)
        else:
            self.network = DataParallel(self.network, chunk_sizes=system_config.chunk_sizes)

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("\033[0;35m " + "total parameters: {}".format(
            total_params) + "\033[0m")

        if system_config.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )
        elif system_config.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.model.parameters()),
                lr=system_config.learning_rate,
                momentum=0.9, weight_decay=0.0001
            )
        else:
            raise ValueError("unknown optimizer") 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:42,代碼來源:py_factory.py

示例2: optimize_model

# 需要導入模塊: from apex import parallel [as 別名]
# 或者: from apex.parallel import convert_syncbn_model [as 別名]
def optimize_model(model, device, local_rank, optimizer=None, distributed=False, use_amp=None):
    """
        Wraps MultiGPU or distributed usage around a model
        No support for ONNX models

        :param model: model to optimize (e.g. trimming weights to fp16 / mixed precision)
        :type model: AdaptiveModel
        :param device: either gpu or cpu, get the device from initialize_device_settings()
        :param distributed: Whether training on distributed machines
        :param local_rank: rank of the machine in a distributed setting
        :param use_amp: Optimization level of nvidia's automatic mixed precision (AMP). The higher the level, the faster the model.
                        Options:
                        "O0" (Normal FP32 training)
                        "O1" (Mixed Precision => Recommended)
                        "O2" (Almost FP16)
                        "O3" (Pure FP16).
                        See details on: https://nvidia.github.io/apex/amp.html
        :return: model, optimizer
        """
    model, optimizer = _init_amp(model, device, optimizer, use_amp)

    if distributed:
        if APEX_PARALLEL_AVAILABLE:
            model = convert_syncbn_model(model)
            logger.info("Multi-GPU Training via DistributedDataParallel and apex.parallel")
        else:
            logger.info("Multi-GPU Training via DistributedDataParallel")

        # for some models DistributedDataParallel might complain about parameters
        # not contributing to loss. find_used_parameters remedies that.
        model = WrappedDDP(model,
                           device_ids=[local_rank],
                           output_device=local_rank,
                           find_unused_parameters=True)

    elif torch.cuda.device_count() > 1 and device.type == "cuda":
        model = WrappedDataParallel(model)
        logger.info("Multi-GPU Training via DataParallel")

    return model, optimizer 
開發者ID:deepset-ai,項目名稱:FARM,代碼行數:42,代碼來源:optimization.py

示例3: __init__

# 需要導入模塊: from apex import parallel [as 別名]
# 或者: from apex.parallel import convert_syncbn_model [as 別名]
def __init__(self, system_config, model, distributed=False, gpu=None):
        super(NetworkFactory, self).__init__()

        self.system_config = system_config

        self.gpu     = gpu
        self.model   = DummyModule(model)
        self.loss    = model.loss
        self.network = Network(self.model, self.loss)

        if distributed:
            from apex.parallel import DistributedDataParallel, convert_syncbn_model
            torch.cuda.set_device(gpu)
            self.network = self.network.cuda(gpu)
            self.network = convert_syncbn_model(self.network)
            self.network = DistributedDataParallel(self.network)
        else:
            self.network = DataParallel(self.network, chunk_sizes=system_config.chunk_sizes)

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("total parameters: {}".format(total_params))

        if system_config.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )
        elif system_config.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.model.parameters()),
                lr=system_config.learning_rate, 
                momentum=0.9, weight_decay=0.0001
            )
        else:
            raise ValueError("unknown optimizer") 
開發者ID:princeton-vl,項目名稱:CornerNet-Lite,代碼行數:41,代碼來源:py_factory.py

示例4: set_syncbn

# 需要導入模塊: from apex import parallel [as 別名]
# 或者: from apex.parallel import convert_syncbn_model [as 別名]
def set_syncbn(net):
    if has_apex:
        net = parallel.convert_syncbn_model(net)
    else:
        net = nn.SyncBatchNorm.convert_sync_batchnorm(net)
    return net 
開發者ID:CoinCheung,項目名稱:BiSeNet,代碼行數:8,代碼來源:train.py


注:本文中的apex.parallel.convert_syncbn_model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。