本文整理匯總了Python中torch.distributed.destroy_process_group方法的典型用法代碼示例。如果您正苦於以下問題:Python distributed.destroy_process_group方法的具體用法?Python distributed.destroy_process_group怎麽用?Python distributed.destroy_process_group使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.distributed
的用法示例。
在下文中一共展示了distributed.destroy_process_group方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: spmd_main
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def spmd_main(local_world_size, local_rank):
# These are the parameters used to initialize the process group
env_dict = {
key: os.environ[key]
for key in ("MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE")
}
print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
dist.init_process_group(backend="nccl")
print(
f"[{os.getpid()}]: world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, backend={dist.get_backend()}"
)
demo_basic(local_world_size, local_rank)
# Tear down the process group
dist.destroy_process_group()
示例2: _test__native_dist_model_create_from_context_no_dist
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
示例3: clean_mp
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def clean_mp(self):
if self.can_parallel:
dist.destroy_process_group()
示例4: cleanup
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def cleanup():
dist.destroy_process_group()
示例5: close
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def close(self):
r"""關閉Trainer,銷毀進程"""
dist.destroy_process_group()
示例6: cleanup
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def cleanup():
dist.destroy_process_group()
# pylint: disable=no-member
示例7: shutdown
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def shutdown(cls):
if dist.get_rank() == 0 and cls.instance.ttp_initialized:
cls.instance.send_obj(
"terminate", cls.instance.get_ttp_rank(), cls.instance.ttp_group
)
dist.destroy_process_group(cls.instance.main_group)
dist.destroy_process_group(cls.instance.ttp_group)
dist.destroy_process_group()
cls.instance = None
示例8: tearDown
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def tearDown(self):
# we need to de-initialize the distributed world, otherwise other
# tests will they're we're distributed when we're really not.
dist.destroy_process_group()
示例9: ray_start_2_cpus
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def ray_start_2_cpus():
address_info = ray.init(num_cpus=2)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
# Ensure that tests don't ALL fail
if dist.is_initialized():
dist.destroy_process_group()
示例10: ray_start_4_cpus
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
# Ensure that tests don't ALL fail
if dist.is_initialized():
dist.destroy_process_group()
示例11: shutdown
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def shutdown(self):
"""Attempts to shut down the worker."""
# However, it seems to be harmless to remove permanently
# since the processes are shutdown anyways. This comment can be
# removed in a future release if it is still not documented
# the stable Pytorch docs.
dist.destroy_process_group()
super(DistributedTorchRunner, self).shutdown()
示例12: run_training_teardown
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def run_training_teardown(self):
if hasattr(self, '_teardown_already_run') and self._teardown_already_run:
return
self._teardown_already_run = True
# Train end events
with self.profiler.profile('on_train_end'):
# callbacks
self.on_train_end()
# model hooks
if self.is_function_implemented('on_train_end'):
self.get_model().on_train_end()
if self.logger is not None:
self.logger.finalize("success")
# summarize profile results
if self.global_rank == 0:
self.profiler.describe()
if self.global_rank == 0:
for proc in self.interactive_ddp_procs:
subprocess.Popen.kill(proc)
# clean up dist group
if self.use_ddp or self.use_ddp2:
torch_distrib.destroy_process_group()
示例13: test_sync_reduce_ddp
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def test_sync_reduce_ddp():
"""Make sure sync-reduce works with DDP"""
tutils.reset_seed()
tutils.set_random_master_port()
worldsize = 2
mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)
# dist.destroy_process_group()
示例14: test_tensor_metric_ddp
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def test_tensor_metric_ddp():
tutils.reset_seed()
tutils.set_random_master_port()
world_size = 2
mp.spawn(_ddp_test_tensor_metric, args=(world_size,), nprocs=world_size)
# dist.destroy_process_group()
示例15: test_numpy_metric_ddp
# 需要導入模塊: from torch import distributed [as 別名]
# 或者: from torch.distributed import destroy_process_group [as 別名]
def test_numpy_metric_ddp():
tutils.reset_seed()
tutils.set_random_master_port()
world_size = 2
mp.spawn(_ddp_test_numpy_metric, args=(world_size,), nprocs=world_size)
# dist.destroy_process_group()