当前位置: 首页>>代码示例>>Python>>正文


Python parallel.convert_syncbn_model方法代码示例

本文整理汇总了Python中apex.parallel.convert_syncbn_model方法的典型用法代码示例。如果您正苦于以下问题:Python parallel.convert_syncbn_model方法的具体用法?Python parallel.convert_syncbn_model怎么用?Python parallel.convert_syncbn_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在apex.parallel的用法示例。


在下文中一共展示了parallel.convert_syncbn_model方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def __init__(self, system_config, model, distributed=False, gpu=None):
        super(NetworkFactory, self).__init__()

        self.system_config = system_config

        self.gpu = gpu
        self.model = DummyModule(model)
        self.loss = model.loss
        self.network = Network(self.model, self.loss)

        if distributed:
            from apex.parallel import DistributedDataParallel, convert_syncbn_model
            torch.cuda.set_device(gpu)
            self.network = self.network.cuda(gpu)
            self.network = convert_syncbn_model(self.network)
            self.network = DistributedDataParallel(self.network)
        else:
            self.network = DataParallel(self.network, chunk_sizes=system_config.chunk_sizes)

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("\033[0;35m " + "total parameters: {}".format(
            total_params) + "\033[0m")

        if system_config.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )
        elif system_config.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.model.parameters()),
                lr=system_config.learning_rate,
                momentum=0.9, weight_decay=0.0001
            )
        else:
            raise ValueError("unknown optimizer") 
开发者ID:DataXujing,项目名称:CornerNet-Lite-Pytorch,代码行数:42,代码来源:py_factory.py

示例2: optimize_model

# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def optimize_model(model, device, local_rank, optimizer=None, distributed=False, use_amp=None):
    """
        Wraps MultiGPU or distributed usage around a model
        No support for ONNX models

        :param model: model to optimize (e.g. trimming weights to fp16 / mixed precision)
        :type model: AdaptiveModel
        :param device: either gpu or cpu, get the device from initialize_device_settings()
        :param distributed: Whether training on distributed machines
        :param local_rank: rank of the machine in a distributed setting
        :param use_amp: Optimization level of nvidia's automatic mixed precision (AMP). The higher the level, the faster the model.
                        Options:
                        "O0" (Normal FP32 training)
                        "O1" (Mixed Precision => Recommended)
                        "O2" (Almost FP16)
                        "O3" (Pure FP16).
                        See details on: https://nvidia.github.io/apex/amp.html
        :return: model, optimizer
        """
    model, optimizer = _init_amp(model, device, optimizer, use_amp)

    if distributed:
        if APEX_PARALLEL_AVAILABLE:
            model = convert_syncbn_model(model)
            logger.info("Multi-GPU Training via DistributedDataParallel and apex.parallel")
        else:
            logger.info("Multi-GPU Training via DistributedDataParallel")

        # for some models DistributedDataParallel might complain about parameters
        # not contributing to loss. find_used_parameters remedies that.
        model = WrappedDDP(model,
                           device_ids=[local_rank],
                           output_device=local_rank,
                           find_unused_parameters=True)

    elif torch.cuda.device_count() > 1 and device.type == "cuda":
        model = WrappedDataParallel(model)
        logger.info("Multi-GPU Training via DataParallel")

    return model, optimizer 
开发者ID:deepset-ai,项目名称:FARM,代码行数:42,代码来源:optimization.py

示例3: __init__

# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def __init__(self, system_config, model, distributed=False, gpu=None):
        super(NetworkFactory, self).__init__()

        self.system_config = system_config

        self.gpu     = gpu
        self.model   = DummyModule(model)
        self.loss    = model.loss
        self.network = Network(self.model, self.loss)

        if distributed:
            from apex.parallel import DistributedDataParallel, convert_syncbn_model
            torch.cuda.set_device(gpu)
            self.network = self.network.cuda(gpu)
            self.network = convert_syncbn_model(self.network)
            self.network = DistributedDataParallel(self.network)
        else:
            self.network = DataParallel(self.network, chunk_sizes=system_config.chunk_sizes)

        total_params = 0
        for params in self.model.parameters():
            num_params = 1
            for x in params.size():
                num_params *= x
            total_params += num_params
        print("total parameters: {}".format(total_params))

        if system_config.opt_algo == "adam":
            self.optimizer = torch.optim.Adam(
                filter(lambda p: p.requires_grad, self.model.parameters())
            )
        elif system_config.opt_algo == "sgd":
            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.model.parameters()),
                lr=system_config.learning_rate, 
                momentum=0.9, weight_decay=0.0001
            )
        else:
            raise ValueError("unknown optimizer") 
开发者ID:princeton-vl,项目名称:CornerNet-Lite,代码行数:41,代码来源:py_factory.py

示例4: set_syncbn

# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def set_syncbn(net):
    if has_apex:
        net = parallel.convert_syncbn_model(net)
    else:
        net = nn.SyncBatchNorm.convert_sync_batchnorm(net)
    return net 
开发者ID:CoinCheung,项目名称:BiSeNet,代码行数:8,代码来源:train.py


注:本文中的apex.parallel.convert_syncbn_model方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。