當前位置: 首頁>>代碼示例>>Python>>正文


Python distributed.is_available方法代碼示例

本文整理匯總了Python中torch.distributed.is_available方法的典型用法代碼示例。如果您正苦於以下問題:Python distributed.is_available方法的具體用法?Python distributed.is_available怎麽用?Python distributed.is_available使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributed的用法示例。


在下文中一共展示了distributed.is_available方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
        if num_replicas is None:
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            num_replicas = dist.get_world_size()
        if rank is None:
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            rank = dist.get_rank()
        self.dataset = dataset
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
        self.total_size = self.num_samples * self.num_replicas
        self.shuffle = shuffle 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:18,代碼來源:distributed.py

示例2: __init__

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
        import torch.distributed as dist

        super().__init__(dataset)
        if num_replicas is None:  # pragma: no cover
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            num_replicas = dist.get_world_size()
        if rank is None:  # pragma: no cover
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            rank = dist.get_rank()

        self.dataset = dataset
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
        self.total_size = self.num_samples * self.num_replicas
        self.shuffle = shuffle 
開發者ID:mars-project,項目名稱:mars,代碼行數:22,代碼來源:sampler.py

示例3: __init__

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
        if num_replicas is None:
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            num_replicas = dist.get_world_size()
        if rank is None:
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            rank = dist.get_rank()
        self.dataset = dataset
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
        self.total_size = self.num_samples * self.num_replicas
        self.shuffle = True 
開發者ID:clw5180,項目名稱:remote_sensing_object_detection_2019,代碼行數:18,代碼來源:distributed.py

示例4: __init__

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def __init__(self, dataset, num_replicas=None, rank=None, pad=True):
        if num_replicas is None:
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            num_replicas = dist.get_world_size()
        if rank is None:
            if not dist.is_available():
                raise RuntimeError("Requires distributed package to be available")
            rank = dist.get_rank()
        self.dataset = dataset
        self.num_replicas = num_replicas
        self.rank = rank
        self.pad = pad
        self.epoch = 0
        if self.pad:
            self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
            self.total_size = self.num_samples * self.num_replicas
        else:
            self.num_samples = int(math.ceil((len(self.dataset)-self.rank) * 1.0 / self.num_replicas))
            self.total_size = len(self.dataset) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:22,代碼來源:sampler.py

示例5: setup_distributed

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def setup_distributed(port=29500):
    if not dist.is_available() or not torch.cuda.is_available() or torch.cuda.device_count() <= 1:
        return 0, 1

    if 'MPIR_CVAR_CH3_INTERFACE_HOSTNAME' in os.environ:
        from mpi4py import MPI
        mpi_rank = MPI.COMM_WORLD.Get_rank()
        mpi_size = MPI.COMM_WORLD.Get_size()

        os.environ["MASTER_ADDR"] = '127.0.0.1'
        os.environ["MASTER_PORT"] = str(port)

        dist.init_process_group(backend="nccl", world_size=mpi_size, rank=mpi_rank)
        return mpi_rank, mpi_size

    dist.init_process_group(backend="nccl", init_method="env://")
    return dist.get_rank(), dist.get_world_size() 
開發者ID:openai,項目名稱:gpt-2-output-dataset,代碼行數:19,代碼來源:train.py

示例6: reduce_mean

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def reduce_mean(tensor):
    if not (dist.is_available() and dist.is_initialized()):
        return tensor
    tensor = tensor.clone()
    dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
    return tensor 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:8,代碼來源:gfl_head.py

示例7: _parse_losses

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def _parse_losses(self, losses):
        """Parse the raw outputs (losses) of the network.

        Args:
            losses (dict): Raw output of the network, which usually contain
                losses and other necessary infomation.

        Returns:
            tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
                which may be a weighted sum of all losses, log_vars contains
                all the variables to be sent to the logger.
        """
        log_vars = OrderedDict()
        for loss_name, loss_value in losses.items():
            if isinstance(loss_value, torch.Tensor):
                log_vars[loss_name] = loss_value.mean()
            elif isinstance(loss_value, list):
                log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
            else:
                raise TypeError(
                    f'{loss_name} is not a tensor or list of tensors')

        loss = sum(_value for _key, _value in log_vars.items()
                   if 'loss' in _key)

        log_vars['loss'] = loss
        for loss_name, loss_value in log_vars.items():
            # reduce loss when distributed training
            if dist.is_available() and dist.is_initialized():
                loss_value = loss_value.data.clone()
                dist.all_reduce(loss_value.div_(dist.get_world_size()))
            log_vars[loss_name] = loss_value.item()

        return loss, log_vars 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:36,代碼來源:base.py

示例8: get_world_size

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def get_world_size():
    if not dist.is_available():
        return 1
    if not dist.is_initialized():
        return 1
    return dist.get_world_size() 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:8,代碼來源:comm.py

示例9: get_rank

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def get_rank():
    if not dist.is_available():
        return 0
    if not dist.is_initialized():
        return 0
    return dist.get_rank() 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:8,代碼來源:comm.py

示例10: synchronize

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def synchronize():
    """
    Helper function to synchronize (barrier) among all processes when
    using distributed training
    """
    if not dist.is_available():
        return
    if not dist.is_initialized():
        return
    world_size = dist.get_world_size()
    if world_size == 1:
        return
    dist.barrier() 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:15,代碼來源:comm.py

示例11: get_world_size

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def get_world_size() -> int:
    if not dist.is_available():
        return 1
    if not dist.is_initialized():
        return 1
    return dist.get_world_size() 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:8,代碼來源:misc.py

示例12: is_dist_avail_and_initialized

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import is_available [as 別名]
def is_dist_avail_and_initialized():
    if not dist.is_available():
        return False
    if not dist.is_initialized():
        return False
    return True 
開發者ID:paperswithcode,項目名稱:torchbench,代碼行數:8,代碼來源:coco_eval.py


注:本文中的torch.distributed.is_available方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。