本文整理汇总了Python中torch.nn.parallel.parallel_apply.parallel_apply方法的典型用法代码示例。如果您正苦于以下问题:Python parallel_apply.parallel_apply方法的具体用法?Python parallel_apply.parallel_apply怎么用?Python parallel_apply.parallel_apply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.parallel.parallel_apply
的用法示例。
在下文中一共展示了parallel_apply.parallel_apply方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError(
"module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(
self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs, **kwargs)
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
示例2: parallel_apply
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs,
self.device_ids[:len(replicas)])
示例3: forward
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
# 分散输入到各个设备里
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
# 复制啥啊
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# 使用并行函数获得输出
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
# 对torch里的函数进行了再包装
示例4: parallel_apply
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
示例5: data_parallel
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
示例6: forward
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
示例7: parallel_apply
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
示例8: forward
# 需要导入模块: from torch.nn.parallel import parallel_apply [as 别名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)