當前位置: 首頁>>代碼示例>>Python>>正文


Python parallel_apply.parallel_apply方法代碼示例

本文整理匯總了Python中torch.nn.parallel.parallel_apply.parallel_apply方法的典型用法代碼示例。如果您正苦於以下問題:Python parallel_apply.parallel_apply方法的具體用法?Python parallel_apply.parallel_apply怎麽用?Python parallel_apply.parallel_apply使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn.parallel.parallel_apply的用法示例。


在下文中一共展示了parallel_apply.parallel_apply方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)

        for t in chain(self.module.parameters(), self.module.buffers()):
            if t.device != self.src_device_obj:
                raise RuntimeError(
                    "module must have its parameters and buffers "
                    "on device {} (device_ids[0]) but found one of "
                    "them on device: {}".format(
                        self.src_device_obj, t.device))
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.module(*inputs, **kwargs)
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return outputs 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:19,代碼來源:EncodingDataParallel.py

示例2: parallel_apply

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def parallel_apply(self, replicas, inputs, kwargs):
        return parallel_apply(replicas, inputs, kwargs,
                              self.device_ids[:len(replicas)]) 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:5,代碼來源:EncodingDataParallel.py

示例3: forward

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        # 分散輸入到各個設備裏
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        # 複製啥啊
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        # 使用並行函數獲得輸出
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device)

    # 對torch裏的函數進行了再包裝 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:16,代碼來源:data_parallel.py

示例4: parallel_apply

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def parallel_apply(self, replicas, inputs, kwargs):
        return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:4,代碼來源:data_parallel.py

示例5: data_parallel

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
    r"""Evaluates module(input) in parallel across the GPUs given in device_ids.

    This is the functional version of the DataParallel module.

    Args:
        module: the module to evaluate in parallel
        inputs: inputs to the module
        device_ids: GPU ids on which to replicate module
        output_device: GPU location of the output  Use -1 to indicate the CPU.
            (default: device_ids[0])
    Returns:
        a Variable containing the result of module(input) located on
        output_device
    """
    if not isinstance(inputs, tuple):
        inputs = (inputs,)

    if device_ids is None:
        device_ids = list(range(torch.cuda.device_count()))

    if output_device is None:
        output_device = device_ids[0]

    inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
    if len(device_ids) == 1:
        return module(*inputs[0], **module_kwargs[0])
    used_device_ids = device_ids[:len(inputs)]
    replicas = replicate(module, used_device_ids)
    outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
    return gather(outputs, output_device, dim) 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:33,代碼來源:data_parallel.py

示例6: forward

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        if self.gpu0_bsz == 0:
            device_ids = self.device_ids[1:]
        else:
            device_ids = self.device_ids
        inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.module, self.device_ids)
        if self.gpu0_bsz == 0:
            replicas = replicas[1:]
        outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
        return self.gather(outputs, self.output_device) 
開發者ID:Sha-Lab,項目名稱:FEAT,代碼行數:17,代碼來源:data_parallel.py

示例7: parallel_apply

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
        return parallel_apply(replicas, inputs, kwargs, device_ids) 
開發者ID:Sha-Lab,項目名稱:FEAT,代碼行數:4,代碼來源:data_parallel.py

示例8: forward

# 需要導入模塊: from torch.nn.parallel import parallel_apply [as 別名]
# 或者: from torch.nn.parallel.parallel_apply import parallel_apply [as 別名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device) 
開發者ID:xingyizhou,項目名稱:ExtremeNet,代碼行數:11,代碼來源:data_parallel.py


注:本文中的torch.nn.parallel.parallel_apply.parallel_apply方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。