當前位置: 首頁>>代碼示例>>Python>>正文


Python Scatter.apply方法代碼示例

本文整理匯總了Python中torch.nn.parallel._functions.Scatter.apply方法的典型用法代碼示例。如果您正苦於以下問題:Python Scatter.apply方法的具體用法?Python Scatter.apply怎麽用?Python Scatter.apply使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn.parallel._functions.Scatter的用法示例。


在下文中一共展示了Scatter.apply方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: scatter

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
    r"""
    Slices variables into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not variables. Does not
    support Tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, Variable):
            return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
        assert not torch.is_tensor(obj), "Tensors not supported in scatter."
        if isinstance(obj, tuple):
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list):
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict):
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    return scatter_map(inputs) 
開發者ID:DataXujing,項目名稱:CornerNet-Lite-Pytorch,代碼行數:22,代碼來源:scatter_gather.py

示例2: tnn_gather

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def tnn_gather(outputs, target_device, dim=0):
    r"""
    Gathers variables from different GPUs on a specified device
      (-1 means the CPU).
    """
    def gather_map(outputs):
        if isinstance(outputs, Variable):
            if target_device == -1:
                return outputs.cpu()
            return outputs.cuda(target_device)

        out = outputs[0]
        if isinstance(out, Variable):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None

        if isinstance(out, ScatterList):
            return tuple(map(gather_map, itertools.chain(*outputs)))

        return type(out)(map(gather_map, zip(*outputs)))
    return gather_map(outputs) 
開發者ID:CharlesShang,項目名稱:Detectron-PYTORCH,代碼行數:24,代碼來源:data_parallel.py

示例3: scatter

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter(inputs, target_gpus, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """

    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            return Scatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        if isinstance(obj, PackedSequence):
            return packed_sequence_scatter(obj, target_gpus)
        return [obj for _ in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
開發者ID:mapillary,項目名稱:seamseg,代碼行數:31,代碼來源:scatter_gather.py

示例4: gather

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def gather(outputs, target_device, dim=0):
    r"""
    Gathers tensors from different GPUs on a specified device
      (-1 means the CPU).
    """

    def gather_map(outputs):
        out = outputs[0]
        if isinstance(out, torch.Tensor):
            return Gather.apply(target_device, dim, *outputs)
        if out is None:
            return None
        if isinstance(out, dict):
            if not all((len(out) == len(d) for d in outputs)):
                raise ValueError('All dicts must have the same number of keys')
            return type(out)(((k, gather_map([d[k] for d in outputs]))
                              for k in out))
        if isinstance(out, PackedSequence):
            return packed_sequence_gather(outputs, target_device)
        return type(out)(map(gather_map, zip(*outputs)))

    # Recursive function calls like this create reference cycles.
    # Setting the function to None clears the refcycle.
    try:
        return gather_map(outputs)
    finally:
        gather_map = None 
開發者ID:mapillary,項目名稱:seamseg,代碼行數:29,代碼來源:scatter_gather.py

示例5: scatter

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter(inputs, target_gpus, dim=0):
    """Scatter inputs to target gpus.

    The only difference from original :func:`scatter` is to add support for
    :type:`~mmcv.parallel.DataContainer`.
    """

    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            return OrigScatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, DataContainer):
            if obj.cpu_only:
                return obj.data
            else:
                return Scatter.forward(target_gpus, obj.data)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            out = list(map(list, zip(*map(scatter_map, obj))))
            return out
        if isinstance(obj, dict) and len(obj) > 0:
            out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
            return out

        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:37,代碼來源:scatter_gather.py

示例6: scatter

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter(inputs, target_gpus, dim=0):
    """Scatter inputs to target gpus.

    The only difference from original :func:`scatter` is to add support for
    :type:`~mmcv.parallel.DataContainer`.
    """

    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            return OrigScatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, DataContainer):
            if obj.cpu_only:
                return obj.data
            else:
                return Scatter.forward(target_gpus, obj.data)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            out = list(map(list, zip(*map(scatter_map, obj))))
            return out
        if isinstance(obj, dict) and len(obj) > 0:
            out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
            return out
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
開發者ID:open-mmlab,項目名稱:mmcv,代碼行數:36,代碼來源:scatter_gather.py

示例7: scatter

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            try:
                return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
            except:
                print('obj', obj.size())
                print('dim', dim)
                print('chunk_sizes', chunk_sizes)
                quit()
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
開發者ID:Sha-Lab,項目名稱:FEAT,代碼行數:34,代碼來源:data_parallel.py

示例8: scatter

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter(inputs, target_gpus, dim=0):
    r"""
    Slices variables into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not variables. Does not
    support Tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, Variable):
            # print('var')
            return Scatter.apply(target_gpus, None, dim, obj)
        assert not torch.is_tensor(obj), "Tensors not supported in scatter."
        if isinstance(obj, ScatterList):
            # print('target_gpus:', target_gpus, 'obj:', len(obj))
            # assert len(obj) == len(target_gpus)
            chunk_size = int(ceil(float(len(obj)) / float(len(target_gpus))))
            # print('scatterlist')
            # print (chunk_size, len(obj))
            return [obj[i*chunk_size: (i+1)*chunk_size] for i in range(len(target_gpus))]
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        # print('others')
        return [obj for targets in target_gpus]

    return scatter_map(inputs) 
開發者ID:CharlesShang,項目名稱:Detectron-PYTORCH,代碼行數:31,代碼來源:data_parallel.py

示例9: scatter_imbalance

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter_imbalance(inputs, target_gpus, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            if (len(target_gpus) == 4) and (obj.size(dim) == 22):
                return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj)
            if (len(target_gpus) == 4) and (obj.size(dim) == 60):
                return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 4) and (obj.size(dim) == 144):
                return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 46):
                return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 62):
                return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 94):
                return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 110):
                return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 118):
                return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 126):
                return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 134):
                return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 142):
                return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj)
            elif (len(target_gpus) == 16) and (obj.size(dim) == 222):
                return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14), dim, obj)
            return Scatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
開發者ID:microsoft,項目名稱:unilm,代碼行數:52,代碼來源:data_parallel.py

示例10: scatter_imbalance

# 需要導入模塊: from torch.nn.parallel._functions import Scatter [as 別名]
# 或者: from torch.nn.parallel._functions.Scatter import apply [as 別名]
def scatter_imbalance(inputs, target_gpus, dim=0):
    r"""
    Slices tensors into approximately equal chunks and
    distributes them across given GPUs. Duplicates
    references to objects that are not tensors.
    """
    def scatter_map(obj):
        if isinstance(obj, torch.Tensor):
            if (len(target_gpus) == 4) and (obj.size(dim) == 22):
                return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj)
            if (len(target_gpus) == 4) and (obj.size(dim) == 60):
                return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 4) and (obj.size(dim) == 144):
                return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 46):
                return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 62):
                return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 94):
                return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 110):
                return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 118):
                return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 126):
                return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 134):
                return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj)
            elif (len(target_gpus) == 8) and (obj.size(dim) == 142):
                return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj)
            return Scatter.apply(target_gpus, None, dim, obj)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(scatter_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(scatter_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
        return [obj for targets in target_gpus]

    # After scatter_map is called, a scatter_map cell will exist. This cell
    # has a reference to the actual function scatter_map, which has references
    # to a closure that has a reference to the scatter_map cell (because the
    # fn is recursive). To avoid this reference cycle, we set the function to
    # None, clearing the cell
    try:
        return scatter_map(inputs)
    finally:
        scatter_map = None 
開發者ID:LuoweiZhou,項目名稱:VLP,代碼行數:50,代碼來源:data_parallel.py


注:本文中的torch.nn.parallel._functions.Scatter.apply方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。