當前位置: 首頁>>代碼示例>>Python>>正文


Python multiprocessing.set_start_method方法代碼示例

本文整理匯總了Python中torch.multiprocessing.set_start_method方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.set_start_method方法的具體用法?Python multiprocessing.set_start_method怎麽用?Python multiprocessing.set_start_method使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.set_start_method方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: setup

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def setup(rank, device_ids, args):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'

    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=len(device_ids))

    train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, \
    retrain_model_path, gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, total_callers, \
    train_mode = args

    # issue with semaphore lock: https://github.com/pytorch/pytorch/issues/2517
    # mp.set_start_method('spawn')

    # Explicitly setting seed to make sure that models created in two processes
    # start from same random weights and biases. https://github.com/pytorch/pytorch/issues/2517
    torch.manual_seed(42)
    train(train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, retrain_model_path,
          gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, train_mode,
          total_callers, rank, device_ids[rank])
    cleanup() 
開發者ID:kishwarshafin,項目名稱:helen,代碼行數:23,代碼來源:train_distributed.py

示例2: init_dist

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def init_dist(launcher, backend='nccl', **kwargs):
    if mp.get_start_method(allow_none=True) is None:
        mp.set_start_method('spawn')
    if launcher == 'pytorch':
        _init_dist_pytorch(backend, **kwargs)
    elif launcher == 'mpi':
        _init_dist_mpi(backend, **kwargs)
    elif launcher == 'slurm':
        _init_dist_slurm(backend, **kwargs)
    else:
        raise ValueError('Invalid launcher type: {}'.format(launcher)) 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:13,代碼來源:env.py

示例3: init_dist

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def init_dist(backend='nccl', **kwargs):
    ''' initialization for distributed training'''
    # if mp.get_start_method(allow_none=True) is None:
    if mp.get_start_method(allow_none=True) != 'spawn':
        mp.set_start_method('spawn')
    rank = int(os.environ['RANK'])
    num_gpus = torch.cuda.device_count()
    torch.cuda.set_device(rank % num_gpus)
    dist.init_process_group(backend=backend, **kwargs) 
開發者ID:xinntao,項目名稱:BasicSR,代碼行數:11,代碼來源:train.py

示例4: init_dist

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def init_dist(backend='nccl', **kwargs):
    if mp.get_start_method(allow_none=True) is None:
        mp.set_start_method('spawn')
    rank = int(os.environ['RANK'])
    num_gpus = torch.cuda.device_count()
    torch.cuda.set_device(rank % num_gpus)
    dist.init_process_group(backend=backend, **kwargs) 
開發者ID:open-mmlab,項目名稱:mmcv,代碼行數:9,代碼來源:train_cifar10.py

示例5: init_dist

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def init_dist(launcher, backend='nccl', **kwargs):
    if mp.get_start_method(allow_none=True) is None:
        mp.set_start_method('spawn')
    if launcher == 'pytorch':
        _init_dist_pytorch(backend, **kwargs)
    elif launcher == 'mpi':
        _init_dist_mpi(backend, **kwargs)
    elif launcher == 'slurm':
        _init_dist_slurm(backend, **kwargs)
    else:
        raise ValueError(f'Invalid launcher type: {launcher}') 
開發者ID:open-mmlab,項目名稱:mmcv,代碼行數:13,代碼來源:dist_utils.py

示例6: init_dist

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def init_dist(backend='nccl', **kwargs):
    ''' initialization for distributed training'''
    # if mp.get_start_method(allow_none=True) is None:
    if mp.get_start_method(allow_none=True) != 'spawn': #Return the name of start method used for starting processes
        mp.set_start_method('spawn', force=True) ##'spawn' is the default on Windows
    rank = int(os.environ['RANK']) #system env process ranks
    num_gpus = torch.cuda.device_count() #Returns the number of GPUs available
    torch.cuda.set_device(rank % num_gpus)
    dist.init_process_group(backend=backend, **kwargs) #Initializes the default distributed process group 
開發者ID:yuanjunchai,項目名稱:IKC,代碼行數:11,代碼來源:train_SFTMD.py

示例7: init_dist

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def init_dist(backend='nccl', **kwargs):
    """initialization for distributed training"""
    if mp.get_start_method(allow_none=True) != 'spawn':
        mp.set_start_method('spawn')
    rank = int(os.environ['RANK'])
    num_gpus = torch.cuda.device_count()
    torch.cuda.set_device(rank % num_gpus)
    dist.init_process_group(backend=backend, **kwargs) 
開發者ID:xinntao,項目名稱:EDVR,代碼行數:10,代碼來源:train.py

示例8: load_pytorch

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import set_start_method [as 別名]
def load_pytorch():
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.enabled = False
    logger.info("Using torch.multiprocessing.set_start_method('spawn')")
    import torch.multiprocessing as multiprocessing
    try:
        multiprocessing.set_start_method('spawn')
    except RuntimeError as e:
        logger.warning(str(e)) 
開發者ID:eleurent,項目名稱:rl-agents,代碼行數:11,代碼來源:utils.py


注:本文中的torch.multiprocessing.set_start_method方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。