本文整理汇总了Python中torch.nn.parallel.scatter_gather.gather方法的典型用法代码示例。如果您正苦于以下问题:Python scatter_gather.gather方法的具体用法?Python scatter_gather.gather怎么用?Python scatter_gather.gather使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.parallel.scatter_gather
的用法示例。
在下文中一共展示了scatter_gather.gather方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _data_parallel_wrapper
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def _data_parallel_wrapper(func_name, device_ids, output_device):
r"""
这个函数是用于对需要多卡执行的函数的wrapper函数。参考的nn.DataParallel的forward函数
:param str, func_name: 对network中的这个函数进行多卡运行
:param device_ids: nn.DataParallel中的device_ids
:param output_device: nn.DataParallel中的output_device
:return:
"""
def wrapper(network, *inputs, **kwargs):
inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
if len(device_ids) == 1:
return getattr(network, func_name)(*inputs[0], **kwargs[0])
replicas = replicate(network, device_ids[:len(inputs)])
outputs = parallel_apply(replicas, func_name, inputs, kwargs, device_ids[:len(replicas)])
return gather(outputs, output_device)
return wrapper
示例2: gather
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def gather(self, outputs, output_device):
n_returns = len(outputs[0])
n_gpus = len(outputs)
if n_returns == 2:
losses = [output[0] for output in outputs]
observation_mean = {}
for output in outputs:
for k, v in output[1].items():
if v is None:
continue
if k not in observation_mean.keys():
observation_mean[k] = v
else:
observation_mean[k] += v
observation_mean = {k: v / n_gpus for k, v in observation_mean.items()}
return gather(losses, output_device, dim=self.dim).mean(), observation_mean
else:
raise ValueError(n_returns)
示例3: forward
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
# 分散输入到各个设备里
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
# 复制啥啊
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# 使用并行函数获得输出
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
# 对torch里的函数进行了再包装
示例4: gather
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
示例5: data_parallel
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
示例6: validation
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def validation(self, epoch):
# Fast test during the training
def eval_batch(model, image, target):
outputs = model(image)
outputs = gather(outputs, 0, dim=0)
pred = outputs[0]
target = target.cuda()
correct, labeled = utils.batch_pix_accuracy(pred.data, target)
inter, union = utils.batch_intersection_union(pred.data, target, self.nclass)
return correct, labeled, inter, union
is_best = False
self.model.eval()
total_inter, total_union, total_correct, total_label = 0, 0, 0, 0
tbar = tqdm(self.valloader, desc='\r')
for i, (image, target) in enumerate(tbar):
with torch.no_grad():
correct, labeled, inter, union = eval_batch(self.model, image, target)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IoU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIoU = IoU.mean()
tbar.set_description(
'pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU))
new_pred = (pixAcc + mIoU)/2
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, self.args, is_best)
示例7: gather
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def gather(self, outputs, output_device):
if self.gather_:
return gather(outputs, output_device, dim=self.dim)
return outputs
示例8: gather
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def gather(self, outputs, target_device=None, dim=0):
r"""
Gathers tensors from different GPUs on a specified device
(-1 means the CPU).
"""
if not self.configer.get('network', 'gathered'):
if target_device is None:
target_device = list(range(torch.cuda.device_count()))[0]
return torch_gather(outputs, target_device, dim=dim)
else:
return outputs
示例9: forward
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
示例10: _data_parallel
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def _data_parallel(self, batch):
u"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output[u'loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {u'loss': losses.mean()}
示例11: gather
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def gather(self, outputs, output_device):
return outputs
示例12: forward
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def forward(self, inputs, *targets, **kwargs):
# input should be already scatterd
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(inputs, *targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
#return Reduce.apply(*outputs) / len(outputs)
#return self.gather(outputs, self.output_device).mean()
return self.gather(outputs, self.output_device)
示例13: forward
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.forward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.forward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
示例14: backward
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.backward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.backward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs, backward=True)
return self.gather(outputs, self.output_device)
示例15: _data_parallel
# 需要导入模块: from torch.nn.parallel import scatter_gather [as 别名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 别名]
def _data_parallel(self, batch):
"""
Do the forward pass using multiple GPUs. This is a simplification
of torch.nn.parallel.data_parallel to support the allennlp model
interface.
"""
inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
used_device_ids = self._cuda_devices[:len(inputs)]
replicas = replicate(self._model, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
# Only the 'loss' is needed.
# a (num_gpu, ) tensor with loss on each GPU
losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
return {'loss': losses.mean()}