當前位置: 首頁>>代碼示例>>Python>>正文


Python distributed.all_reduce方法代碼示例

本文整理匯總了Python中torch.distributed.all_reduce方法的典型用法代碼示例。如果您正苦於以下問題:Python distributed.all_reduce方法的具體用法?Python distributed.all_reduce怎麽用?Python distributed.all_reduce使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributed的用法示例。


在下文中一共展示了distributed.all_reduce方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: backward

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def backward(ctx, grad_mean_out, grad_cov_out):
        in_data, mean_bn = ctx.saved_tensors

        if ctx.training:
            dist.all_reduce(grad_mean_out)
            dist.all_reduce(grad_cov_out)
            world_size = dist.get_world_size()
        else:
            world_size = 1

        grad_cov_out = (grad_cov_out + grad_cov_out.transpose(1, 2)) / 2
        grad_cov_in = 2 * torch.bmm(grad_cov_out, (in_data - mean_bn)) \
            / (ctx.NHW*world_size)   # g x c x (N x H x W)

        grad_mean_in = grad_mean_out / ctx.NHW / world_size
        inDiff = grad_mean_in + grad_cov_in
        return inDiff, None, None, None, None 
開發者ID:XingangPan,項目名稱:Switchable-Whitening,代碼行數:19,代碼來源:sync_switchwhiten.py

示例2: _allreduce_coalesced

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
    if bucket_size_mb > 0:
        bucket_size_bytes = bucket_size_mb * 1024 * 1024
        buckets = _take_tensors(tensors, bucket_size_bytes)
    else:
        buckets = OrderedDict()
        for tensor in tensors:
            tp = tensor.type()
            if tp not in buckets:
                buckets[tp] = []
            buckets[tp].append(tensor)
        buckets = buckets.values()

    for bucket in buckets:
        flat_tensors = _flatten_dense_tensors(bucket)
        dist.all_reduce(flat_tensors)
        flat_tensors.div_(world_size)
        for tensor, synced in zip(
                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
            tensor.copy_(synced) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:dist_utils.py

示例3: allreduce_grads

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
    """Allreduce gradients.

    Args:
        params (list[torch.Parameters]): List of parameters of a model
        coalesce (bool, optional): Whether allreduce parameters as a whole.
            Defaults to True.
        bucket_size_mb (int, optional): Size of bucket, the unit is MB.
            Defaults to -1.
    """
    grads = [
        param.grad.data for param in params
        if param.requires_grad and param.grad is not None
    ]
    world_size = dist.get_world_size()
    if coalesce:
        _allreduce_coalesced(grads, world_size, bucket_size_mb)
    else:
        for tensor in grads:
            dist.all_reduce(tensor.div_(world_size)) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:dist_utils.py

示例4: _all_reduce_coalesced

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def _all_reduce_coalesced(tensors, world_size, bucket_size_mb=-1):
    if bucket_size_mb > 0:
        bucket_size_bytes = bucket_size_mb * 1024 * 1024
        buckets = _take_tensors(tensors, bucket_size_bytes)
    else:
        buckets = OrderedDict()
        for tensor in tensors:
            tp = tensor.type()
            if tp not in buckets:
                buckets[tp] = []
            buckets[tp].append(tensor)
        buckets = buckets.values()

    for bucket in buckets:
        flat_tensors = _flatten_dense_tensors(bucket)
        dist.all_reduce(flat_tensors)
        flat_tensors.div_(world_size)
        for tensor, synced in zip(
                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
            tensor.copy_(synced) 
開發者ID:DeepMotionAIResearch,項目名稱:DenseMatchingBenchmark,代碼行數:22,代碼來源:dist_utils.py

示例5: reduce_dict

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def reduce_dict(input_dict, average=True):
    """
    Args:
        input_dict (dict): all the values will be reduced
        average (bool): whether to do average or sum
    Reduce the values in the dictionary from all processes so that all processes
    have the averaged results. Returns a dict with the same fields as
    input_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return input_dict
    with torch.no_grad():
        names = []
        values = []
        # sort the keys so that they are consistent across processes
        for k in sorted(input_dict.keys()):
            names.append(k)
            values.append(input_dict[k])
        values = torch.stack(values, dim=0)
        dist.all_reduce(values)
        if average:
            values /= world_size
        reduced_dict = {k: v for k, v in zip(names, values)}
    return reduced_dict 
開發者ID:lopuhin,項目名稱:kaggle-kuzushiji-2019,代碼行數:27,代碼來源:utils.py

示例6: process_generic_model

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def process_generic_model(params: List, iters: int, has_early_stop: bool = False):
    """
    Runs a mock training with zero grads. This is due to a bug where the connection gets reset with custom new groups.
    :param params: The params of the model
    :param iters: Iterations.
    """
    # Hopefully this function can go away in newer versions.
    for i in range(iters):
        for p in params:
            z = torch.zeros(p)
            dist.all_reduce(z, op=torch.distributed.ReduceOp.SUM)

        if has_early_stop:
            dist.all_reduce(torch.tensor(0.0), op=torch.distributed.ReduceOp.SUM)
            zeros = torch.zeros(1)
            dist.all_reduce(zeros, op=torch.distributed.ReduceOp.SUM)
            if zeros.item() > 0:
                break 
開發者ID:dmmiller612,項目名稱:sparktorch,代碼行數:20,代碼來源:distributed.py

示例7: average_across_processes

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def average_across_processes(t: Union[torch.Tensor, Dict[str, torch.Tensor]]):
    r"""
    Averages a tensor, or a dict of tensors across all processes in a process
    group. Objects in all processes will finally have same mean value.

    .. note::

        Nested dicts of tensors are not supported.

    Parameters
    ----------
    t: torch.Tensor or Dict[str, torch.Tensor]
        A tensor or dict of tensors to average across processes.
    """
    if dist.is_initialized():
        if isinstance(t, torch.Tensor):
            dist.all_reduce(t, op=dist.ReduceOp.SUM)
            t /= get_world_size()
        elif isinstance(t, dict):
            for k in t:
                dist.all_reduce(t[k], op=dist.ReduceOp.SUM)
                t[k] /= dist.get_world_size() 
開發者ID:kdexd,項目名稱:virtex,代碼行數:24,代碼來源:distributed.py

示例8: step

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def step(self, loss):
        self.optimizer.zero_grad()
        loss.backward()
        dist.barrier()
        handles = []
        for param in self.network.parameters():
            handles.append(dist.all_reduce(param.grad, async_op=True))
        for handle in handles:
            handle.wait()
        if self.divide_grad:
            for param in self.network.parameters():
                param.grad.mul_(1.0 / self.world_sz)
        if self.grad_norm_clip:
            nn.utils.clip_grad_norm_(
                self.network.parameters(), self.grad_norm_clip
            )
        self.optimizer.step() 
開發者ID:heronsystems,項目名稱:adeptRL,代碼行數:19,代碼來源:distrib.py

示例9: step

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def step(self):
        print(f"learner {self.rank} step")

        # make sure exp_handles are done
        for handle in self.exp_handles:
            handle.wait()

        # batch together exp
        time.sleep(random.randint(0, 3))

        # update with other learners
        dist.barrier(self.learner_group)
        for p in self.network_grads:
            dist.all_reduce(p, group=self.learner_group)
        print(f"learner {self.rank} shared gradients")
        return True 
開發者ID:heronsystems,項目名稱:adeptRL,代碼行數:18,代碼來源:ray_container.py

示例10: backward

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def backward(self, grad_output):
        norm, std, weight = self.saved_tensors
        grad_weight = torch.empty_like(weight)
        grad_bias = torch.empty_like(weight)
        grad_input = torch.empty_like(grad_output)
        grad_output3d = grad_output.view(
            grad_output.size(0), grad_output.size(1), -1)
        grad_input3d = grad_input.view_as(grad_output3d)
        ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight,
                                          grad_bias)
        # all reduce
        if self.group_size > 1:
            dist.all_reduce(grad_weight, group=self.group)
            dist.all_reduce(grad_bias, group=self.group)
            grad_weight /= self.group_size
            grad_bias /= self.group_size
        ext_module.sync_bn_backward_data(grad_output3d, weight, grad_weight,
                                         grad_bias, norm, std, grad_input3d)
        return grad_input, None, None, grad_weight, grad_bias, \
            None, None, None, None 
開發者ID:open-mmlab,項目名稱:mmcv,代碼行數:22,代碼來源:sync_bn.py

示例11: all_reduce

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def all_reduce(self, input, op=ReduceOp.SUM, batched=False):
        """Reduces the input data across all parties; all get the final result."""
        assert dist.is_initialized(), "initialize the communicator first"

        if batched:
            assert isinstance(input, list), "batched reduce input must be a list"
            reqs = []
            result = [x.clone() for x in input]
            for tensor in result:
                reqs.append(
                    dist.all_reduce(
                        tensor.data, op=op, group=self.main_group, async_op=True
                    )
                )
            for req in reqs:
                req.wait()
        else:
            assert torch.is_tensor(
                input.data
            ), "unbatched input for reduce must be a torch tensor"
            result = input.clone()
            dist.all_reduce(result.data, op=op, group=self.main_group)
        return result 
開發者ID:facebookresearch,項目名稱:CrypTen,代碼行數:25,代碼來源:distributed_communicator.py

示例12: reduce_mean

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def reduce_mean(tensor):
    if not (dist.is_available() and dist.is_initialized()):
        return tensor
    tensor = tensor.clone()
    dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
    return tensor 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:8,代碼來源:gfl_head.py

示例13: _parse_losses

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def _parse_losses(self, losses):
        """Parse the raw outputs (losses) of the network.

        Args:
            losses (dict): Raw output of the network, which usually contain
                losses and other necessary infomation.

        Returns:
            tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
                which may be a weighted sum of all losses, log_vars contains
                all the variables to be sent to the logger.
        """
        log_vars = OrderedDict()
        for loss_name, loss_value in losses.items():
            if isinstance(loss_value, torch.Tensor):
                log_vars[loss_name] = loss_value.mean()
            elif isinstance(loss_value, list):
                log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
            else:
                raise TypeError(
                    f'{loss_name} is not a tensor or list of tensors')

        loss = sum(_value for _key, _value in log_vars.items()
                   if 'loss' in _key)

        log_vars['loss'] = loss
        for loss_name, loss_value in log_vars.items():
            # reduce loss when distributed training
            if dist.is_available() and dist.is_initialized():
                loss_value = loss_value.data.clone()
                dist.all_reduce(loss_value.div_(dist.get_world_size()))
            log_vars[loss_name] = loss_value.item()

        return loss, log_vars 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:36,代碼來源:base.py

示例14: average_gradients

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def average_gradients(model):
    """ Gradient averaging. """
    size = float(dist.get_world_size())
    for name, param in model.named_parameters():
        try:
            dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
            param.grad.data /= size
        except Exception as e:
            logger.error('Error when all_reduce parameter {}, size={}, grad_type={}, error message {}'.format(
                name, param.size(), param.grad.data.dtype, repr(e)
            )) 
開發者ID:dolphin-zs,項目名稱:Doc2EDAG,代碼行數:13,代碼來源:base_task.py

示例15: all_reduce_tensor

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import all_reduce [as 別名]
def all_reduce_tensor(tensor, op=dist.ReduceOp.SUM, world_size=1):
    tensor = tensor.clone()
    dist.all_reduce(tensor, op)
    tensor.div_(world_size)
    return tensor 
開發者ID:lxtGH,項目名稱:Fast_Seg,代碼行數:7,代碼來源:tools.py


注:本文中的torch.distributed.all_reduce方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。