当前位置: 首页>>代码示例>>Python>>正文


Python Scatter.apply方法代码示例

本文整理汇总了Python中torch.nn.parallel._functions.Scatter.apply方法的典型用法代码示例。如果您正苦于以下问题:Python Scatter.apply方法的具体用法?Python Scatter.apply怎么用?Python Scatter.apply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.parallel._functions.Scatter的用法示例。


在下文中一共展示了Scatter.apply方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: scatter

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
    r"""
    Slices variables into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not variables. Does not
    support Tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, Variable):
            return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
        assert not torch.is_tensor(obj), "Tensors not supported in scatter."
        if isinstance(obj, tuple):
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list):
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict):
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    return scatter_map(inputs) 
开发者ID:DataXujing,项目名称:CornerNet-Lite-Pytorch,代码行数:22,代码来源:scatter_gather.py

示例2: tnn_gather

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def tnn_gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    def gather_map(outputs):
        if isinstance(outputs, Variable):
            if target_device == -1:
                return outputs.cpu()
            return outputs.cuda(target_device)

        out = outputs[0]
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None

        if isinstance(out, ScatterList):
            return tuple(map(gather_map, itertools.chain(*outputs)))

        return type(out)(map(gather_map, zip(*outputs)))
    return gather_map(outputs) 
开发者ID:CharlesShang,项目名称:Detectron-PYTORCH,代码行数:24,代码来源:data_parallel.py

示例3: scatter

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter(inputs, target_gpus, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """

    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            return Scatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        if isinstance(obj, PackedSequence):
            return packed_sequence_scatter(obj, target_gpus)
        return [obj for _ in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
开发者ID:mapillary,项目名称:seamseg,代码行数:31,代码来源:scatter_gather.py

示例4: gather

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def gather(outputs, target_device, dim=0):
    r"""
    Gathers tensors from different GPUs on a specified device
      (-1 means the CPU).
    """

    def gather_map(outputs):
        out = outputs[0]
        if isinstance(out, torch.Tensor):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, dict):
            if not all((len(out) == len(d) for d in outputs)):
                raise ValueError('All dicts must have the same number of keys')
            return type(out)(((k, gather_map([d[k] for d in outputs]))
                              for k in out))
        if isinstance(out, PackedSequence):
            return packed_sequence_gather(outputs, target_device)
        return type(out)(map(gather_map, zip(*outputs)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
开发者ID:mapillary,项目名称:seamseg,代码行数:29,代码来源:scatter_gather.py

示例5: scatter

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter(inputs, target_gpus, dim=0):
    """Scatter inputs to target gpus.

    The only difference from original :func:`scatter` is to add support for
    :type:`~mmcv.parallel.DataContainer`.
    """

    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            return OrigScatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, DataContainer):
            if obj.cpu_only:
                return obj.data
            else:
                return Scatter.forward(target_gpus, obj.data)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            out = list(map(list, zip(*map(scatter_map, obj))))
            return out
        if isinstance(obj, dict) and len(obj) > 0:
            out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
            return out

        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:37,代码来源:scatter_gather.py

示例6: scatter

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter(inputs, target_gpus, dim=0):
    """Scatter inputs to target gpus.

    The only difference from original :func:`scatter` is to add support for
    :type:`~mmcv.parallel.DataContainer`.
    """

    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            return OrigScatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, DataContainer):
            if obj.cpu_only:
                return obj.data
            else:
                return Scatter.forward(target_gpus, obj.data)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            out = list(map(list, zip(*map(scatter_map, obj))))
            return out
        if isinstance(obj, dict) and len(obj) > 0:
            out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
            return out
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
开发者ID:open-mmlab,项目名称:mmcv,代码行数:36,代码来源:scatter_gather.py

示例7: scatter

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            try:
                return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
            except:
                print('obj', obj.size())
                print('dim', dim)
                print('chunk_sizes', chunk_sizes)
                quit()
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
开发者ID:Sha-Lab,项目名称:FEAT,代码行数:34,代码来源:data_parallel.py

示例8: scatter

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter(inputs, target_gpus, dim=0):
    r"""
    Slices variables into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not variables. Does not
    support Tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, Variable):
            # print('var')
            return Scatter.apply(target_gpus, None, dim, obj)
        assert not torch.is_tensor(obj), "Tensors not supported in scatter."
        if isinstance(obj, ScatterList):
            # print('target_gpus:', target_gpus, 'obj:', len(obj))
            # assert len(obj) == len(target_gpus)
            chunk_size = int(ceil(float(len(obj)) / float(len(target_gpus))))
            # print('scatterlist')
            # print (chunk_size, len(obj))
            return [obj[i*chunk_size: (i+1)*chunk_size] for i in range(len(target_gpus))]
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        # print('others')
        return [obj for targets in target_gpus]

    return scatter_map(inputs) 
开发者ID:CharlesShang,项目名称:Detectron-PYTORCH,代码行数:31,代码来源:data_parallel.py

示例9: scatter_imbalance

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter_imbalance(inputs, target_gpus, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            if (len(target_gpus) == 4) and (obj.size(dim) == 22):
                return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj)
            if (len(target_gpus) == 4) and (obj.size(dim) == 60):
                return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 4) and (obj.size(dim) == 144):
                return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 46):
                return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 62):
                return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 94):
                return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 110):
                return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 118):
                return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 126):
                return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 134):
                return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 142):
                return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj)
            elif (len(target_gpus) == 16) and (obj.size(dim) == 222):
                return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14), dim, obj)
            return Scatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
开发者ID:microsoft,项目名称:unilm,代码行数:52,代码来源:data_parallel.py

示例10: scatter_imbalance

# 需要导入模块: from torch.nn.parallel._functions import Scatter [as 别名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 别名]
def scatter_imbalance(inputs, target_gpus, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            if (len(target_gpus) == 4) and (obj.size(dim) == 22):
                return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj)
            if (len(target_gpus) == 4) and (obj.size(dim) == 60):
                return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 4) and (obj.size(dim) == 144):
                return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 46):
                return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 62):
                return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 94):
                return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 110):
                return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 118):
                return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 126):
                return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 134):
                return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 142):
                return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj)
            return Scatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
开发者ID:LuoweiZhou,项目名称:VLP,代码行数:50,代码来源:data_parallel.py


注:本文中的torch.nn.parallel._functions.Scatter.apply方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。