本文整理汇总了Python中torch.nn.parallel.replicate.replicate方法的典型用法代码示例。如果您正苦于以下问题:Python replicate.replicate方法的具体用法?Python replicate.replicate怎么用?Python replicate.replicate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.parallel.replicate
的用法示例。
在下文中一共展示了replicate.replicate方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError(
"module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(
self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs, **kwargs)
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
示例2: _data_parallel_wrapper
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def _data_parallel_wrapper(func_name, device_ids, output_device):
r"""
这个函数是用于对需要多卡执行的函数的wrapper函数。参考的nn.DataParallel的forward函数
:param str, func_name: 对network中的这个函数进行多卡运行
:param device_ids: nn.DataParallel中的device_ids
:param output_device: nn.DataParallel中的output_device
:return:
"""
def wrapper(network, *inputs, **kwargs):
inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
if len(device_ids) == 1:
return getattr(network, func_name)(*inputs[0], **kwargs[0])
replicas = replicate(network, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, func_name, inputs, kwargs, device_ids[:len(replicas)])
return gather(outputs, output_device)
return wrapper
示例3: replicate
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(self, module, device_ids):
return replicate(module, device_ids, not torch.is_grad_enabled())
示例4: forward
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
# 分散输入到各个设备里
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
# 复制啥啊
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# 使用并行函数获得输出
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
# 对torch里的函数进行了再包装
示例5: replicate
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(self, module, device_ids):
return replicate(module, device_ids)
示例6: data_parallel
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
示例7: forward
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
示例8: forward
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.forward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.forward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
示例9: backward
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.backward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.backward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs, backward=True)
return self.gather(outputs, self.output_device)
示例10: replicate
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(self, flow, device_ids):
return replicate(flow, device_ids)
示例11: forward
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[: len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
示例12: replicate
# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(module, device_ids):
return replicate(module, device_ids)