本文整理汇总了Python中torch.multiprocessing.set_start_method方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.set_start_method方法的具体用法?Python multiprocessing.set_start_method怎么用?Python multiprocessing.set_start_method使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.set_start_method方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def setup(rank, device_ids, args):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=len(device_ids))
train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, \
retrain_model_path, gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, total_callers, \
train_mode = args
# issue with semaphore lock: https://github.com/pytorch/pytorch/issues/2517
# mp.set_start_method('spawn')
# Explicitly setting seed to make sure that models created in two processes
# start from same random weights and biases. https://github.com/pytorch/pytorch/issues/2517
torch.manual_seed(42)
train(train_file, test_file, batch_size, epochs, gpu_mode, num_workers, retrain_model, retrain_model_path,
gru_layers, hidden_size, learning_rate, weight_decay, model_dir, stats_dir, train_mode,
total_callers, rank, device_ids[rank])
cleanup()
示例2: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
示例3: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
''' initialization for distributed training'''
# if mp.get_start_method(allow_none=True) is None:
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
示例4: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
示例5: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
示例6: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
''' initialization for distributed training'''
# if mp.get_start_method(allow_none=True) is None:
if mp.get_start_method(allow_none=True) != 'spawn': #Return the name of start method used for starting processes
mp.set_start_method('spawn', force=True) ##'spawn' is the default on Windows
rank = int(os.environ['RANK']) #system env process ranks
num_gpus = torch.cuda.device_count() #Returns the number of GPUs available
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs) #Initializes the default distributed process group
示例7: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
"""initialization for distributed training"""
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
示例8: load_pytorch
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import set_start_method [as 别名]
def load_pytorch():
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
logger.info("Using torch.multiprocessing.set_start_method('spawn')")
import torch.multiprocessing as multiprocessing
try:
multiprocessing.set_start_method('spawn')
except RuntimeError as e:
logger.warning(str(e))