本文整理汇总了Python中torch.nn.parallel.scatter_gather.scatter_kwargs方法的典型用法代码示例。如果您正苦于以下问题:Python scatter_gather.scatter_kwargs方法的具体用法?Python scatter_gather.scatter_kwargs怎么用?Python scatter_gather.scatter_kwargs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.parallel.scatter_gather
的用法示例。
在下文中一共展示了scatter_gather.scatter_kwargs方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _data_parallel_wrapper
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import scatter_kwargs [as 别名]
def _data_parallel_wrapper(func_name, device_ids, output_device):
r"""
这个函数是用于对需要多卡执行的函数的wrapper函数。参考的nn.DataParallel的forward函数
:param str, func_name: 对network中的这个函数进行多卡运行
:param device_ids: nn.DataParallel中的device_ids
:param output_device: nn.DataParallel中的output_device
:return:
"""
def wrapper(network, *inputs, **kwargs):
inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
if len(device_ids) == 1:
return getattr(network, func_name)(*inputs[0], **kwargs[0])
replicas = replicate(network, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, func_name, inputs, kwargs, device_ids[:len(replicas)])
return gather(outputs, output_device)
return wrapper
示例2: scatter
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import scatter_kwargs [as 别名]
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
示例3: _data_parallel
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import scatter_kwargs [as 别名]
def _data_parallel(self, batch):
u"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output[u'loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {u'loss': losses.mean()}
示例4: scatter
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import scatter_kwargs [as 别名]
def scatter(self, inputs, kwargs, device_ids):
try:
params = kwargs.pop('params')
except KeyError:
return super(DataParallel, self).scatter(inputs, kwargs, device_ids)
inputs_, kwargs_ = scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
# Add params argument unchanged back in kwargs
replicas = self._replicate_params(params, inputs_, device_ids,
detach=not torch.is_grad_enabled())
kwargs_ = tuple(dict(params=replica, **kwarg)
for (kwarg, replica) in zip(kwargs_, replicas))
return inputs_, kwargs_
示例5: _data_parallel
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import scatter_kwargs [as 别名]
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}
示例6: scatter
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import scatter_kwargs [as 别名]
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)