當前位置: 首頁>>代碼示例>>Python>>正文


Python scatter_gather.gather方法代碼示例

本文整理匯總了Python中torch.nn.parallel.scatter_gather.gather方法的典型用法代碼示例。如果您正苦於以下問題:Python scatter_gather.gather方法的具體用法?Python scatter_gather.gather怎麽用?Python scatter_gather.gather使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn.parallel.scatter_gather的用法示例。


在下文中一共展示了scatter_gather.gather方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _data_parallel_wrapper

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def _data_parallel_wrapper(func_name, device_ids, output_device):
    r"""
    這個函數是用於對需要多卡執行的函數的wrapper函數。參考的nn.DataParallel的forward函數

    :param str, func_name: 對network中的這個函數進行多卡運行
    :param device_ids: nn.DataParallel中的device_ids
    :param output_device: nn.DataParallel中的output_device
    :return:
    """
    
    def wrapper(network, *inputs, **kwargs):
        inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
        if len(device_ids) == 1:
            return getattr(network, func_name)(*inputs[0], **kwargs[0])
        replicas = replicate(network, device_ids[:len(inputs)])
        outputs = parallel_apply(replicas, func_name, inputs, kwargs, device_ids[:len(replicas)])
        return gather(outputs, output_device)
    
    return wrapper 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:21,代碼來源:_parallel_utils.py

示例2: gather

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def gather(self, outputs, output_device):
        n_returns = len(outputs[0])
        n_gpus = len(outputs)
        if n_returns == 2:
            losses = [output[0] for output in outputs]
            observation_mean = {}
            for output in outputs:
                for k, v in output[1].items():
                    if v is None:
                        continue
                    if k not in observation_mean.keys():
                        observation_mean[k] = v
                    else:
                        observation_mean[k] += v
                observation_mean = {k: v / n_gpus for k, v in observation_mean.items()}
            return gather(losses, output_device, dim=self.dim).mean(), observation_mean
        else:
            raise ValueError(n_returns) 
開發者ID:hirofumi0810,項目名稱:neural_sp,代碼行數:20,代碼來源:data_parallel.py

示例3: forward

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        # 分散輸入到各個設備裏
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        # 複製啥啊
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        # 使用並行函數獲得輸出
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device)

    # 對torch裏的函數進行了再包裝 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:16,代碼來源:data_parallel.py

示例4: gather

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def gather(self, outputs, output_device):
        return gather(outputs, output_device, dim=self.dim) 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:4,代碼來源:data_parallel.py

示例5: data_parallel

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
    r"""Evaluates module(input) in parallel across the GPUs given in device_ids.

    This is the functional version of the DataParallel module.

    Args:
        module: the module to evaluate in parallel
        inputs: inputs to the module
        device_ids: GPU ids on which to replicate module
        output_device: GPU location of the output  Use -1 to indicate the CPU.
            (default: device_ids[0])
    Returns:
        a Variable containing the result of module(input) located on
        output_device
    """
    if not isinstance(inputs, tuple):
        inputs = (inputs,)

    if device_ids is None:
        device_ids = list(range(torch.cuda.device_count()))

    if output_device is None:
        output_device = device_ids[0]

    inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
    if len(device_ids) == 1:
        return module(*inputs[0], **module_kwargs[0])
    used_device_ids = device_ids[:len(inputs)]
    replicas = replicate(module, used_device_ids)
    outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
    return gather(outputs, output_device, dim) 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:33,代碼來源:data_parallel.py

示例6: validation

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def validation(self, epoch):
        # Fast test during the training
        def eval_batch(model, image, target):
            outputs = model(image)
            outputs = gather(outputs, 0, dim=0)
            pred = outputs[0]
            target = target.cuda()
            correct, labeled = utils.batch_pix_accuracy(pred.data, target)
            inter, union = utils.batch_intersection_union(pred.data, target, self.nclass)
            return correct, labeled, inter, union

        is_best = False
        self.model.eval()
        total_inter, total_union, total_correct, total_label = 0, 0, 0, 0
        tbar = tqdm(self.valloader, desc='\r')
        for i, (image, target) in enumerate(tbar):
            with torch.no_grad():
                correct, labeled, inter, union = eval_batch(self.model, image, target)

            total_correct += correct
            total_label += labeled
            total_inter += inter
            total_union += union
            pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
            IoU = 1.0 * total_inter / (np.spacing(1) + total_union)
            mIoU = IoU.mean()
            tbar.set_description(
                'pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU))

        new_pred = (pixAcc + mIoU)/2
        if new_pred > self.best_pred:
            is_best = True
            self.best_pred = new_pred
        utils.save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': self.model.module.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'best_pred': self.best_pred,
        }, self.args, is_best) 
開發者ID:zhanghang1989,項目名稱:PyTorch-Encoding,代碼行數:41,代碼來源:train.py

示例7: gather

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def gather(self, outputs, output_device):
        if self.gather_:
            return gather(outputs, output_device, dim=self.dim)

        return outputs 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:7,代碼來源:data_parallel.py

示例8: gather

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def gather(self, outputs, target_device=None, dim=0):
        r"""
        Gathers tensors from different GPUs on a specified device
          (-1 means the CPU).
        """
        if not self.configer.get('network', 'gathered'):
            if target_device is None:
                target_device = list(range(torch.cuda.device_count()))[0]

            return torch_gather(outputs, target_device, dim=dim)

        else:
            return outputs 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:15,代碼來源:module_runner.py

示例9: forward

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device) 
開發者ID:xingyizhou,項目名稱:ExtremeNet,代碼行數:11,代碼來源:data_parallel.py

示例10: _data_parallel

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def _data_parallel(self, batch):
        u"""
        Do the forward pass using multiple GPUs.  This is a simplification
        of torch.nn.parallel.data_parallel to support the allennlp model
        interface.
        """
        inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
        used_device_ids = self._cuda_devices[:len(inputs)]
        replicas = replicate(self._model, used_device_ids)
        outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)

        # Only the 'loss' is needed.
        # a (num_gpu, ) tensor with loss on each GPU
        losses = gather([output[u'loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
        return {u'loss': losses.mean()} 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:17,代碼來源:trainer.py

示例11: gather

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def gather(self, outputs, output_device):
        return outputs 
開發者ID:INK-USC,項目名稱:KagNet,代碼行數:4,代碼來源:parallel.py

示例12: forward

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def forward(self, inputs, *targets, **kwargs):
        # input should be already scatterd
        # scattering the targets instead
        if not self.device_ids:
            return self.module(inputs, *targets, **kwargs)
        targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.module(inputs, *targets[0], **kwargs[0])
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
        #return Reduce.apply(*outputs) / len(outputs)
        #return self.gather(outputs, self.output_device).mean()
        return self.gather(outputs, self.output_device) 
開發者ID:INK-USC,項目名稱:KagNet,代碼行數:15,代碼來源:parallel.py

示例13: forward

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
        if not self.device_ids:
            return self.flow.forward(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.flow.forward(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device) 
開發者ID:XuezheMax,項目名稱:flowseq,代碼行數:11,代碼來源:data_parallel.py

示例14: backward

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
        if not self.device_ids:
            return self.flow.backward(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.flow.backward(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs, backward=True)
        return self.gather(outputs, self.output_device) 
開發者ID:XuezheMax,項目名稱:flowseq,代碼行數:11,代碼來源:data_parallel.py

示例15: _data_parallel

# 需要導入模塊: from torch.nn.parallel import scatter_gather [as 別名]
# 或者: from torch.nn.parallel.scatter_gather import gather [as 別名]
def _data_parallel(self, batch):
        """
        Do the forward pass using multiple GPUs.  This is a simplification
        of torch.nn.parallel.data_parallel to support the allennlp model
        interface.
        """
        inputs, module_kwargs = scatter_kwargs((), batch, self._cuda_devices, 0)
        used_device_ids = self._cuda_devices[:len(inputs)]
        replicas = replicate(self._model, used_device_ids)
        outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)

        # Only the 'loss' is needed.
        # a (num_gpu, ) tensor with loss on each GPU
        losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0)
        return {'loss': losses.mean()} 
開發者ID:allenai,項目名稱:scicite,代碼行數:17,代碼來源:multitask_trainer.py


注:本文中的torch.nn.parallel.scatter_gather.gather方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。