當前位置: 首頁>>代碼示例>>Python>>正文


Python distributed.init_process_group方法代碼示例

本文整理匯總了Python中torch.distributed.init_process_group方法的典型用法代碼示例。如果您正苦於以下問題:Python distributed.init_process_group方法的具體用法?Python distributed.init_process_group怎麽用?Python distributed.init_process_group使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributed的用法示例。


在下文中一共展示了distributed.init_process_group方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: setup

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def setup(rank, device_ids, args):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=len(device_ids))

    train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, \
    retrain_model_path, gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, total_callers, \
    train_mode = args

    # issue with semaphore lock: https://github.com/pytorch/pytorch/issues/2517
    # mp.set_start_method('spawn')

    # Explicitly setting seed to make sure that models created in two processes
    # start from same random weights and biases. https://github.com/pytorch/pytorch/issues/2517
    torch.manual_seed(42)
    train(train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, retrain_model_path,
          gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, train_mode,
          total_callers, rank, device_ids[rank])
    cleanup() 
開發者ID:kishwarshafin,項目名稱:helen,代碼行數:23,代碼來源:train_distributed.py

示例2: setup

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def setup(rank, total_callers, args, all_input_files, all_devices):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=total_callers)

    # expand the arguments
    output_filepath, model_path, batch_size, num_workers = args

    # call prediction function
    predict(all_input_files[rank],
            output_filepath,
            model_path,
            batch_size,
            num_workers,
            rank,
            all_devices[rank])
    cleanup() 
開發者ID:kishwarshafin,項目名稱:helen,代碼行數:21,代碼來源:predict_gpu.py

示例3: setup

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def setup(rank, total_callers, args, all_input_files):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=total_callers)

    # expand the arguments
    output_filepath, model_path, batch_size, num_workers, threads = args

    # call prediction function
    predict(all_input_files[rank],
            output_filepath,
            model_path,
            batch_size,
            num_workers,
            rank,
            threads)
    cleanup() 
開發者ID:kishwarshafin,項目名稱:helen,代碼行數:21,代碼來源:predict_cpu.py

示例4: __init__

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def __init__(self, rank, learner_ranks, worker_ranks, ip, port):
        world_size = len(learner_ranks) + len(worker_ranks)
        dist.init_process_group(
            "nccl",
            init_method="tcp://{}:{}".format(ip, port),
            rank=rank,
            world_size=world_size,
        )
        groups = {}
        for learner_rank in learner_ranks:
            for worker_rank in worker_ranks:
                g = dist.new_group([learner_rank, worker_rank])
                if worker_rank == rank:
                    groups[learner_rank] = g
        dist.new_group(learner_ranks)

        self.groups = groups
        self.device = torch.device(f"cuda:{ray.get_gpu_ids()[0]}")
        self.rank = rank
        self.network = torch.zeros(3).to(self.device)
        self.exp = None
        self.network_handle = None 
開發者ID:heronsystems,項目名稱:adeptRL,代碼行數:24,代碼來源:ray_container.py

示例5: init_process_group

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def init_process_group(backend):

    comm = MPI.COMM_WORLD
    world_size = comm.Get_size()
    rank = comm.Get_rank()

    info = dict()
    if rank == 0:
        host = socket.gethostname()
        address = socket.gethostbyname(host)
        info.update(dict(MASTER_ADDR=address, MASTER_PORT='1234'))

    info = comm.bcast(info, root=0)
    info.update(dict(WORLD_SIZE=str(world_size), RANK=str(rank)))
    os.environ.update(info)

    distributed.init_process_group(backend=backend) 
開發者ID:skmhrk1209,項目名稱:Single-Path-NAS-PyTorch,代碼行數:19,代碼來源:distributed.py

示例6: setup_distributed

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def setup_distributed(port=29500):
    if not dist.is_available() or not torch.cuda.is_available() or torch.cuda.device_count() <= 1:
        return 0, 1

    if 'MPIR_CVAR_CH3_INTERFACE_HOSTNAME' in os.environ:
        from mpi4py import MPI
        mpi_rank = MPI.COMM_WORLD.Get_rank()
        mpi_size = MPI.COMM_WORLD.Get_size()

        os.environ["MASTER_ADDR"] = '127.0.0.1'
        os.environ["MASTER_PORT"] = str(port)

        dist.init_process_group(backend="nccl", world_size=mpi_size, rank=mpi_rank)
        return mpi_rank, mpi_size

    dist.init_process_group(backend="nccl", init_method="env://")
    return dist.get_rank(), dist.get_world_size() 
開發者ID:openai,項目名稱:gpt-2-output-dataset,代碼行數:19,代碼來源:train.py

示例7: spmd_main

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def spmd_main(local_world_size, local_rank):
    # These are the parameters used to initialize the process group
    env_dict = {
        key: os.environ[key]
        for key in ("MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE")
    }
    print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
    dist.init_process_group(backend="nccl")
    print(
        f"[{os.getpid()}]: world_size = {dist.get_world_size()}, "
        + f"rank = {dist.get_rank()}, backend={dist.get_backend()}"
    )

    demo_basic(local_world_size, local_rank)

    # Tear down the process group
    dist.destroy_process_group() 
開發者ID:pytorch,項目名稱:examples,代碼行數:19,代碼來源:example.py

示例8: setup

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def setup():
    os.environ['MASTER_ADDR'] = args.master
    os.environ['MASTER_PORT'] = '29500'

    # initialize the process group
    dist.init_process_group("gloo", rank=args.rank, world_size=args.world_size)

    # Explicitly setting seed makes sure that models created in two processes
    # start from same random weights and biases. Alternatively, sync models
    # on start with the callback below.
    #torch.manual_seed(42) 
開發者ID:pytorchbearer,項目名稱:torchbearer,代碼行數:13,代碼來源:distributed_data_parallel.py

示例9: _init_dist_pytorch

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def _init_dist_pytorch(backend, **kwargs):
    # TODO: use local_rank instead of rank % num_gpus
    rank = int(os.environ['RANK'])
    num_gpus = torch.cuda.device_count()
    torch.cuda.set_device(rank % num_gpus)
    dist.init_process_group(backend=backend, **kwargs) 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:8,代碼來源:env.py

示例10: _init_dist_slurm

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def _init_dist_slurm(backend, port=29500, **kwargs):
    proc_id = int(os.environ['SLURM_PROCID'])
    ntasks = int(os.environ['SLURM_NTASKS'])
    node_list = os.environ['SLURM_NODELIST']
    num_gpus = torch.cuda.device_count()
    torch.cuda.set_device(proc_id % num_gpus)
    addr = subprocess.getoutput(
        'scontrol show hostname {} | head -n1'.format(node_list))
    os.environ['MASTER_PORT'] = str(port)
    os.environ['MASTER_ADDR'] = addr
    os.environ['WORLD_SIZE'] = str(ntasks)
    os.environ['RANK'] = str(proc_id)
    dist.init_process_group(backend=backend) 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:15,代碼來源:env.py

示例11: main

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def main(args):
    # Initialize multi-processing
    distributed.init_process_group(backend='nccl', init_method='env://')
    device_id, device = args.local_rank, torch.device(args.local_rank)
    rank, world_size = distributed.get_rank(), distributed.get_world_size()
    torch.cuda.set_device(device_id)

    # Initialize logging
    if rank == 0:
        logging.init(args.log_dir, "test")

    # Load configuration
    config = make_config(args)

    # Create dataloader
    test_dataloader = make_dataloader(args, config, rank, world_size)
    meta = load_meta(args.meta)

    # Create model
    model = make_model(config, meta["num_thing"], meta["num_stuff"])

    # Load snapshot
    log_debug("Loading snapshot from %s", args.model)
    resume_from_snapshot(model, args.model, ["body", "rpn_head", "roi_head"])

    # Init GPU stuff
    torch.backends.cudnn.benchmark = config["general"].getboolean("cudnn_benchmark")
    model = DistributedDataParallel(model.cuda(device), device_ids=[device_id], output_device=device_id)

    if args.raw:
        save_function = partial(save_prediction_raw, out_dir=args.out_dir)
    else:
        save_function = partial(
            save_prediction_image, out_dir=args.out_dir, colors=meta["palette"],
            num_stuff=meta["num_stuff"], threshold=args.threshold)
    test(model, test_dataloader, device=device, summary=None,
         log_interval=config["general"].getint("log_interval"), save_function=save_function) 
開發者ID:mapillary,項目名稱:seamseg,代碼行數:39,代碼來源:test_instance_seg.py

示例12: setup

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def setup(hp, rank, world_size):
    os.environ["MASTER_ADDR"] = hp.train.dist.master_addr
    os.environ["MASTER_PORT"] = hp.train.dist.master_port

    # initialize the process group
    dist.init_process_group(hp.train.dist.mode, rank=rank, world_size=world_size) 
開發者ID:ryul99,項目名稱:pytorch-project-template,代碼行數:8,代碼來源:trainer.py

示例13: main

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def main():
    import torch.nn as nn
    import torch.distributed as dist
    import torch.optim as optim
    import torch.utils.data

    dist.init_process_group(backend='gloo')
    torch.manual_seed(42)

    data = torch.rand((1000, 32), dtype=torch.float32)
    labels = torch.randint(1, (1000, 10), dtype=torch.float32)

    train_dataset = torch.utils.data.TensorDataset(data, labels)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=32,
                                               shuffle=False,
                                               sampler=train_sampler)

    model = nn.parallel.DistributedDataParallel(get_model())
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01, momentum=0.5)
    criterion = nn.BCELoss()

    for _ in range(2):
        # 2 epochs
        for _, (batch_data, batch_labels) in enumerate(train_loader):
            outputs = model(batch_data)
            loss = criterion(outputs.squeeze(), batch_labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step() 
開發者ID:mars-project,項目名稱:mars,代碼行數:34,代碼來源:pytorch_sample.py

示例14: main

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def main():
    import torch.nn as nn
    import torch.distributed as dist
    import torch.optim as optim
    import torch.utils.data
    import mars.tensor as mt
    from mars.learn.contrib.pytorch import MarsDataset, MarsDistributedSampler

    dist.init_process_group(backend='gloo')
    torch.manual_seed(42)

    data = mt.named_tensor(name='data')
    labels = mt.named_tensor(name='labels')
    train_dataset = MarsDataset(data, labels)
    train_sampler = MarsDistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=32,
                                               shuffle=False,
                                               sampler=train_sampler)

    model = nn.parallel.DistributedDataParallel(get_model())
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01, momentum=0.5)
    criterion = nn.BCELoss()

    for _ in range(2):
        # 2 epochs
        for _, (batch_data, batch_labels) in enumerate(train_loader):
            outputs = model(batch_data)
            loss = criterion(outputs.squeeze(), batch_labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step() 
開發者ID:mars-project,項目名稱:mars,代碼行數:35,代碼來源:dataset_sample.py

示例15: init_processes

# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import init_process_group [as 別名]
def init_processes(backend, master_addr, master_port, rank, world_size,
                   rows, columns, host, num_gpus):
    # Initialize the distributed environment.
    os.environ['WORLD_SIZE'] = str(world_size)
    os.environ['RANK'] = str(rank)
    os.environ['MASTER_ADDR'] = master_addr
    os.environ['MASTER_PORT'] = master_port

    logger.info('Init process rank {} on host \'{}\''.format(rank, host))
    dist.init_process_group(backend=backend, rank=rank, world_size=world_size)
    run(backend, rank, rows, columns, num_gpus) 
開發者ID:aws,項目名稱:sagemaker-pytorch-training-toolkit,代碼行數:13,代碼來源:distributed_operations.py


注:本文中的torch.distributed.init_process_group方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。