本文整理汇总了Python中torch.multiprocessing.get_start_method方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.get_start_method方法的具体用法?Python multiprocessing.get_start_method怎么用?Python multiprocessing.get_start_method使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.multiprocessing
的用法示例。
在下文中一共展示了multiprocessing.get_start_method方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import get_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
''' initialization for distributed training'''
# if mp.get_start_method(allow_none=True) is None:
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
示例2: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import get_start_method [as 别名]
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
示例3: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import get_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
示例4: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import get_start_method [as 别名]
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
示例5: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import get_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
''' initialization for distributed training'''
# if mp.get_start_method(allow_none=True) is None:
if mp.get_start_method(allow_none=True) != 'spawn': #Return the name of start method used for starting processes
mp.set_start_method('spawn', force=True) ##'spawn' is the default on Windows
rank = int(os.environ['RANK']) #system env process ranks
num_gpus = torch.cuda.device_count() #Returns the number of GPUs available
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs) #Initializes the default distributed process group
示例6: init_dist
# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import get_start_method [as 别名]
def init_dist(backend='nccl', **kwargs):
"""initialization for distributed training"""
if mp.get_start_method(allow_none=True) != 'spawn':
mp.set_start_method('spawn')
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)