當前位置: 首頁>>代碼示例>>Python>>正文


Python distributed.reduce方法代碼示例

本文整理匯總了Python中torch.distributed.reduce方法的典型用法代碼示例。如果您正苦於以下問題:Python distributed.reduce方法的具體用法?Python distributed.reduce怎麽用?Python distributed.reduce使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributed的用法示例。


在下文中一共展示了distributed.reduce方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: reduce_loss_dict

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce_loss_dict(loss_dict):
    """
    Reduce the loss dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    loss_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return loss_dict
    with torch.no_grad():
        loss_names = []
        all_losses = []
        for k in sorted(loss_dict.keys()):
            loss_names.append(k)
            all_losses.append(loss_dict[k])
        all_losses = torch.stack(all_losses, dim=0)
        dist.reduce(all_losses, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            all_losses /= world_size
        reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
    return reduced_losses 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:25,代碼來源:trainer.py

示例2: reduce_loss_dict

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce_loss_dict(loss_dict):
    """
    Reduce the loss dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    loss_dict, after reduction.   (avg)
    """
    world_size = get_world_size()
    if world_size < 2:
        return loss_dict
    with torch.no_grad():
        loss_names = []
        all_losses = []
        for k in sorted(loss_dict.keys()):
            loss_names.append(k)
            all_losses.append(loss_dict[k])
        all_losses = torch.stack(all_losses, dim=0)
        dist.reduce(all_losses, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            all_losses /= world_size
        reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
    return reduced_losses 
開發者ID:DingXiaoH,項目名稱:ACNet,代碼行數:25,代碼來源:comm.py

示例3: reduce

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce(self, input, dst, op=ReduceOp.SUM, batched=False):
        """Reduces the input data across all parties."""
        assert dist.is_initialized(), "initialize the communicator first"

        if batched:
            assert isinstance(input, list), "batched reduce input must be a list"
            reqs = []
            result = [x.clone().data for x in input]
            for tensor in result:
                reqs.append(
                    dist.reduce(
                        tensor, dst, op=op, group=self.main_group, async_op=True
                    )
                )
            for req in reqs:
                req.wait()
        else:
            assert torch.is_tensor(
                input.data
            ), "unbatched input for reduce must be a torch tensor"
            result = input.clone()
            dist.reduce(result.data, dst, op=op, group=self.main_group)

        return result if dst == self.get_rank() else None 
開發者ID:facebookresearch,項目名稱:CrypTen,代碼行數:26,代碼來源:distributed_communicator.py

示例4: all_reduce

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def all_reduce(self, input, op=ReduceOp.SUM, batched=False):
        """Reduces the input data across all parties; all get the final result."""
        assert dist.is_initialized(), "initialize the communicator first"

        if batched:
            assert isinstance(input, list), "batched reduce input must be a list"
            reqs = []
            result = [x.clone() for x in input]
            for tensor in result:
                reqs.append(
                    dist.all_reduce(
                        tensor.data, op=op, group=self.main_group, async_op=True
                    )
                )
            for req in reqs:
                req.wait()
        else:
            assert torch.is_tensor(
                input.data
            ), "unbatched input for reduce must be a torch tensor"
            result = input.clone()
            dist.all_reduce(result.data, op=op, group=self.main_group)
        return result 
開發者ID:facebookresearch,項目名稱:CrypTen,代碼行數:25,代碼來源:distributed_communicator.py

示例5: broadcast

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def broadcast(self, input, src, batched=False):
        """Broadcasts the tensor to all parties."""
        assert dist.is_initialized(), "initialize the communicator first"
        if batched:
            assert isinstance(input, list), "batched reduce input must be a list"
            reqs = []
            for tensor in input:
                reqs.append(
                    dist.broadcast(tensor, src, group=self.main_group, async_op=True)
                )
            for req in reqs:
                req.wait()
        else:
            assert torch.is_tensor(
                input.data
            ), "unbatched input for reduce must be a torch tensor"
            dist.broadcast(input.data, src, group=self.main_group)
        return input 
開發者ID:facebookresearch,項目名稱:CrypTen,代碼行數:20,代碼來源:distributed_communicator.py

示例6: reduce_loss_dict

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce_loss_dict(loss_dict):
    """
    Reduce the loss dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    loss_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return loss_dict
    with torch.no_grad():
        loss_names = []
        all_losses = []
        for k, v in loss_dict.items():
            loss_names.append(k)
            all_losses.append(v)
        all_losses = torch.stack(all_losses, dim=0)
        dist.reduce(all_losses, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            all_losses /= world_size
        reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
    return reduced_losses 
開發者ID:HRNet,項目名稱:HRNet-MaskRCNN-Benchmark,代碼行數:25,代碼來源:trainer.py

示例7: reduce_scalar_outputs

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce_scalar_outputs(scalar_outputs):
    world_size = get_world_size()
    if world_size < 2:
        return scalar_outputs
    with torch.no_grad():
        names = []
        scalars = []
        for k in sorted(scalar_outputs.keys()):
            names.append(k)
            scalars.append(scalar_outputs[k])
        scalars = torch.stack(scalars, dim=0)
        dist.reduce(scalars, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            scalars /= world_size
        reduced_scalars = {k: v for k, v in zip(names, scalars)}

    return reduced_scalars 
開發者ID:alibaba,項目名稱:cascade-stereo,代碼行數:21,代碼來源:utils.py

示例8: reduce_loss_dict

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce_loss_dict(loss_dict):
    """
    Reduce the loss dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    loss_dict, after reduction.
    """
    world_size = dist_util.get_world_size()
    if world_size < 2:
        return loss_dict
    with torch.no_grad():
        loss_names = []
        all_losses = []
        for k in sorted(loss_dict.keys()):
            loss_names.append(k)
            all_losses.append(loss_dict[k])
        all_losses = torch.stack(all_losses, dim=0)
        dist.reduce(all_losses, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            all_losses /= world_size
        reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
    return reduced_losses 
開發者ID:lufficc,項目名稱:SSD,代碼行數:25,代碼來源:trainer.py

示例9: reduce_dict

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import reduce [as 別名]
def reduce_dict(input_dict, average=True):
    """
    Args:
        input_dict (dict): all the values will be reduced
        average (bool): whether to do average or sum
    Reduce the values in the dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    input_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return input_dict
    with torch.no_grad():
        names = []
        values = []
        # sort the keys so that they are consistent across processes
        for k in sorted(input_dict.keys()):
            names.append(k)
            values.append(input_dict[k])
        values = torch.stack(values, dim=0)
        dist.reduce(values, dst=0)
        if dist.get_rank() == 0 and average:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            values /= world_size
        reduced_dict = {k: v for k, v in zip(names, values)}
    return reduced_dict 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:29,代碼來源:comm.py


注:本文中的torch.distributed.reduce方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。