本文整理汇总了Python中apex.parallel.convert_syncbn_model方法的典型用法代码示例。如果您正苦于以下问题:Python parallel.convert_syncbn_model方法的具体用法?Python parallel.convert_syncbn_model怎么用?Python parallel.convert_syncbn_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类apex.parallel
的用法示例。
在下文中一共展示了parallel.convert_syncbn_model方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def __init__(self, system_config, model, distributed=False, gpu=None):
super(NetworkFactory, self).__init__()
self.system_config = system_config
self.gpu = gpu
self.model = DummyModule(model)
self.loss = model.loss
self.network = Network(self.model, self.loss)
if distributed:
from apex.parallel import DistributedDataParallel, convert_syncbn_model
torch.cuda.set_device(gpu)
self.network = self.network.cuda(gpu)
self.network = convert_syncbn_model(self.network)
self.network = DistributedDataParallel(self.network)
else:
self.network = DataParallel(self.network, chunk_sizes=system_config.chunk_sizes)
total_params = 0
for params in self.model.parameters():
num_params = 1
for x in params.size():
num_params *= x
total_params += num_params
print("\033[0;35m " + "total parameters: {}".format(
total_params) + "\033[0m")
if system_config.opt_algo == "adam":
self.optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters())
)
elif system_config.opt_algo == "sgd":
self.optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=system_config.learning_rate,
momentum=0.9, weight_decay=0.0001
)
else:
raise ValueError("unknown optimizer")
示例2: optimize_model
# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def optimize_model(model, device, local_rank, optimizer=None, distributed=False, use_amp=None):
"""
Wraps MultiGPU or distributed usage around a model
No support for ONNX models
:param model: model to optimize (e.g. trimming weights to fp16 / mixed precision)
:type model: AdaptiveModel
:param device: either gpu or cpu, get the device from initialize_device_settings()
:param distributed: Whether training on distributed machines
:param local_rank: rank of the machine in a distributed setting
:param use_amp: Optimization level of nvidia's automatic mixed precision (AMP). The higher the level, the faster the model.
Options:
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:return: model, optimizer
"""
model, optimizer = _init_amp(model, device, optimizer, use_amp)
if distributed:
if APEX_PARALLEL_AVAILABLE:
model = convert_syncbn_model(model)
logger.info("Multi-GPU Training via DistributedDataParallel and apex.parallel")
else:
logger.info("Multi-GPU Training via DistributedDataParallel")
# for some models DistributedDataParallel might complain about parameters
# not contributing to loss. find_used_parameters remedies that.
model = WrappedDDP(model,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True)
elif torch.cuda.device_count() > 1 and device.type == "cuda":
model = WrappedDataParallel(model)
logger.info("Multi-GPU Training via DataParallel")
return model, optimizer
示例3: __init__
# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def __init__(self, system_config, model, distributed=False, gpu=None):
super(NetworkFactory, self).__init__()
self.system_config = system_config
self.gpu = gpu
self.model = DummyModule(model)
self.loss = model.loss
self.network = Network(self.model, self.loss)
if distributed:
from apex.parallel import DistributedDataParallel, convert_syncbn_model
torch.cuda.set_device(gpu)
self.network = self.network.cuda(gpu)
self.network = convert_syncbn_model(self.network)
self.network = DistributedDataParallel(self.network)
else:
self.network = DataParallel(self.network, chunk_sizes=system_config.chunk_sizes)
total_params = 0
for params in self.model.parameters():
num_params = 1
for x in params.size():
num_params *= x
total_params += num_params
print("total parameters: {}".format(total_params))
if system_config.opt_algo == "adam":
self.optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters())
)
elif system_config.opt_algo == "sgd":
self.optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=system_config.learning_rate,
momentum=0.9, weight_decay=0.0001
)
else:
raise ValueError("unknown optimizer")
示例4: set_syncbn
# 需要导入模块: from apex import parallel [as 别名]
# 或者: from apex.parallel import convert_syncbn_model [as 别名]
def set_syncbn(net):
if has_apex:
net = parallel.convert_syncbn_model(net)
else:
net = nn.SyncBatchNorm.convert_sync_batchnorm(net)
return net