当前位置: 首页>>代码示例>>Python>>正文


Python replicate.replicate方法代码示例

本文整理汇总了Python中torch.nn.parallel.replicate.replicate方法的典型用法代码示例。如果您正苦于以下问题:Python replicate.replicate方法的具体用法?Python replicate.replicate怎么用?Python replicate.replicate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.parallel.replicate的用法示例。


在下文中一共展示了replicate.replicate方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)

        for t in chain(self.module.parameters(), self.module.buffers()):
            if t.device != self.src_device_obj:
                raise RuntimeError(
                    "module must have its parameters and buffers "
                    "on device {} (device_ids[0]) but found one of "
                    "them on device: {}".format(
                        self.src_device_obj, t.device))
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.module(*inputs, **kwargs)
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return outputs 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:19,代码来源:EncodingDataParallel.py

示例2: _data_parallel_wrapper

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def _data_parallel_wrapper(func_name, device_ids, output_device):
    r"""
    这个函数是用于对需要多卡执行的函数的wrapper函数。参考的nn.DataParallel的forward函数

    :param str, func_name: 对network中的这个函数进行多卡运行
    :param device_ids: nn.DataParallel中的device_ids
    :param output_device: nn.DataParallel中的output_device
    :return:
    """
    
    def wrapper(network, *inputs, **kwargs):
        inputs, kwargs = scatter_kwargs(inputs, kwargs, device_ids, dim=0)
        if len(device_ids) == 1:
            return getattr(network, func_name)(*inputs[0], **kwargs[0])
        replicas = replicate(network, device_ids[:len(inputs)])
        outputs = parallel_apply(replicas, func_name, inputs, kwargs, device_ids[:len(replicas)])
        return gather(outputs, output_device)
    
    return wrapper 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:21,代码来源:_parallel_utils.py

示例3: replicate

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(self, module, device_ids):
        return replicate(module, device_ids, not torch.is_grad_enabled()) 
开发者ID:PistonY,项目名称:torch-toolbox,代码行数:4,代码来源:EncodingDataParallel.py

示例4: forward

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        # 分散输入到各个设备里
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        # 复制啥啊
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        # 使用并行函数获得输出
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device)

    # 对torch里的函数进行了再包装 
开发者ID:DataXujing,项目名称:CornerNet-Lite-Pytorch,代码行数:16,代码来源:data_parallel.py

示例5: replicate

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(self, module, device_ids):
        return replicate(module, device_ids) 
开发者ID:DataXujing,项目名称:CornerNet-Lite-Pytorch,代码行数:4,代码来源:data_parallel.py

示例6: data_parallel

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
    r"""Evaluates module(input) in parallel across the GPUs given in device_ids.

    This is the functional version of the DataParallel module.

    Args:
        module: the module to evaluate in parallel
        inputs: inputs to the module
        device_ids: GPU ids on which to replicate module
        output_device: GPU location of the output  Use -1 to indicate the CPU.
            (default: device_ids[0])
    Returns:
        a Variable containing the result of module(input) located on
        output_device
    """
    if not isinstance(inputs, tuple):
        inputs = (inputs,)

    if device_ids is None:
        device_ids = list(range(torch.cuda.device_count()))

    if output_device is None:
        output_device = device_ids[0]

    inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
    if len(device_ids) == 1:
        return module(*inputs[0], **module_kwargs[0])
    used_device_ids = device_ids[:len(inputs)]
    replicas = replicate(module, used_device_ids)
    outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
    return gather(outputs, output_device, dim) 
开发者ID:DataXujing,项目名称:CornerNet-Lite-Pytorch,代码行数:33,代码来源:data_parallel.py

示例7: forward

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device) 
开发者ID:xingyizhou,项目名称:ExtremeNet,代码行数:11,代码来源:data_parallel.py

示例8: forward

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
        if not self.device_ids:
            return self.flow.forward(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.flow.forward(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return self.gather(outputs, self.output_device) 
开发者ID:XuezheMax,项目名称:flowseq,代码行数:11,代码来源:data_parallel.py

示例9: backward

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
        if not self.device_ids:
            return self.flow.backward(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.flow.backward(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs, backward=True)
        return self.gather(outputs, self.output_device) 
开发者ID:XuezheMax,项目名称:flowseq,代码行数:11,代码来源:data_parallel.py

示例10: replicate

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(self, flow, device_ids):
        return replicate(flow, device_ids) 
开发者ID:XuezheMax,项目名称:flowseq,代码行数:4,代码来源:data_parallel.py

示例11: forward

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def forward(self, *inputs, **kwargs):
        if not self.device_ids:
            return self.module(*inputs, **kwargs)
        inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
        if len(self.device_ids) == 1:
            return self.module(*inputs[0], **kwargs[0])
        replicas = self.replicate(self.module, self.device_ids[: len(inputs)])
        outputs = self.parallel_apply(replicas, inputs, kwargs)
        return outputs 
开发者ID:belskikh,项目名称:kekas,代码行数:11,代码来源:parallel.py

示例12: replicate

# 需要导入模块: from torch.nn.parallel import replicate [as 别名]
# 或者: from torch.nn.parallel.replicate import replicate [as 别名]
def replicate(module, device_ids):
        return replicate(module, device_ids) 
开发者ID:belskikh,项目名称:kekas,代码行数:4,代码来源:parallel.py


注:本文中的torch.nn.parallel.replicate.replicate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。