当前位置: 首页>>代码示例>>Python>>正文


Python distributed.destroy_process_group方法代码示例

本文整理汇总了Python中torch.distributed.destroy_process_group方法的典型用法代码示例。如果您正苦于以下问题:Python distributed.destroy_process_group方法的具体用法?Python distributed.destroy_process_group怎么用?Python distributed.destroy_process_group使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.distributed的用法示例。


在下文中一共展示了distributed.destroy_process_group方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: spmd_main

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def spmd_main(local_world_size, local_rank):
    # These are the parameters used to initialize the process group
    env_dict = {
        key: os.environ[key]
        for key in ("MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE")
    }
    print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
    dist.init_process_group(backend="nccl")
    print(
        f"[{os.getpid()}]: world_size = {dist.get_world_size()}, "
        + f"rank = {dist.get_rank()}, backend={dist.get_backend()}"
    )

    demo_basic(local_world_size, local_rank)

    # Tear down the process group
    dist.destroy_process_group() 
开发者ID:pytorch,项目名称:examples,代码行数:19,代码来源:example.py

示例2: _test__native_dist_model_create_from_context_no_dist

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):

    assert _NativeDistModel.create_from_context() is None

    dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
    dist.barrier()

    _test__native_dist_model_create_from_context_no_local_rank()

    true_conf = {
        "device": true_device,
        "local_rank": 0,
        "rank": 0,
        "world_size": 1,
        "node_index": 0,
        "nnodes": 1,
        "nproc_per_node": 1,
    }

    _test__native_dist_model_create_from_context_env_local_rank(true_conf)
    _test__native_dist_model_create_from_context_set_local_rank(true_conf)

    dist.destroy_process_group() 
开发者ID:pytorch,项目名称:ignite,代码行数:25,代码来源:test_native.py

示例3: clean_mp

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def clean_mp(self):
        if self.can_parallel:
            dist.destroy_process_group() 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:5,代码来源:i3d_learner.py

示例4: cleanup

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def cleanup():
    dist.destroy_process_group() 
开发者ID:ryul99,项目名称:pytorch-project-template,代码行数:4,代码来源:trainer.py

示例5: close

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def close(self):
        r"""关闭Trainer,销毁进程"""
        dist.destroy_process_group() 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:5,代码来源:dist_trainer.py

示例6: cleanup

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def cleanup():
    dist.destroy_process_group()

# pylint: disable=no-member 
开发者ID:maxjiang93,项目名称:space_time_pde,代码行数:6,代码来源:train_ddp.py

示例7: shutdown

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def shutdown(cls):
        if dist.get_rank() == 0 and cls.instance.ttp_initialized:
            cls.instance.send_obj(
                "terminate", cls.instance.get_ttp_rank(), cls.instance.ttp_group
            )
        dist.destroy_process_group(cls.instance.main_group)
        dist.destroy_process_group(cls.instance.ttp_group)
        dist.destroy_process_group()
        cls.instance = None 
开发者ID:facebookresearch,项目名称:CrypTen,代码行数:11,代码来源:distributed_communicator.py

示例8: tearDown

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def tearDown(self):
        # we need to de-initialize the distributed world, otherwise other
        # tests will they're we're distributed when we're really not.
        dist.destroy_process_group() 
开发者ID:facebookresearch,项目名称:ParlAI,代码行数:6,代码来源:test_distributed.py

示例9: ray_start_2_cpus

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def ray_start_2_cpus():
    address_info = ray.init(num_cpus=2)
    yield address_info
    # The code after the yield will run as teardown code.
    ray.shutdown()
    # Ensure that tests don't ALL fail
    if dist.is_initialized():
        dist.destroy_process_group() 
开发者ID:ray-project,项目名称:ray,代码行数:10,代码来源:test_torch.py

示例10: ray_start_4_cpus

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def ray_start_4_cpus():
    address_info = ray.init(num_cpus=4)
    yield address_info
    # The code after the yield will run as teardown code.
    ray.shutdown()
    # Ensure that tests don't ALL fail
    if dist.is_initialized():
        dist.destroy_process_group() 
开发者ID:ray-project,项目名称:ray,代码行数:10,代码来源:test_torch.py

示例11: shutdown

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def shutdown(self):
        """Attempts to shut down the worker."""
        # However, it seems to be harmless to remove permanently
        # since the processes are shutdown anyways. This comment can be
        # removed in a future release if it is still not documented
        # the stable Pytorch docs.
        dist.destroy_process_group()
        super(DistributedTorchRunner, self).shutdown() 
开发者ID:ray-project,项目名称:ray,代码行数:10,代码来源:distributed_torch_runner.py

示例12: run_training_teardown

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def run_training_teardown(self):
        if hasattr(self, '_teardown_already_run') and self._teardown_already_run:
            return

        self._teardown_already_run = True

        # Train end events
        with self.profiler.profile('on_train_end'):
            # callbacks
            self.on_train_end()
            # model hooks
            if self.is_function_implemented('on_train_end'):
                self.get_model().on_train_end()

        if self.logger is not None:
            self.logger.finalize("success")

        # summarize profile results
        if self.global_rank == 0:
            self.profiler.describe()

        if self.global_rank == 0:
            for proc in self.interactive_ddp_procs:
                subprocess.Popen.kill(proc)

        # clean up dist group
        if self.use_ddp or self.use_ddp2:
            torch_distrib.destroy_process_group() 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:30,代码来源:training_loop.py

示例13: test_sync_reduce_ddp

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def test_sync_reduce_ddp():
    """Make sure sync-reduce works with DDP"""
    tutils.reset_seed()
    tutils.set_random_master_port()

    worldsize = 2
    mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)

    # dist.destroy_process_group() 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:11,代码来源:test_converters.py

示例14: test_tensor_metric_ddp

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def test_tensor_metric_ddp():
    tutils.reset_seed()
    tutils.set_random_master_port()

    world_size = 2
    mp.spawn(_ddp_test_tensor_metric, args=(world_size,), nprocs=world_size)
    # dist.destroy_process_group() 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:9,代码来源:test_converters.py

示例15: test_numpy_metric_ddp

# 需要导入模块: from torch import distributed [as 别名]
# 或者: from torch.distributed import destroy_process_group [as 别名]
def test_numpy_metric_ddp():
    tutils.reset_seed()
    tutils.set_random_master_port()
    world_size = 2
    mp.spawn(_ddp_test_numpy_metric, args=(world_size,), nprocs=world_size)
    # dist.destroy_process_group() 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:8,代码来源:test_converters.py


注:本文中的torch.distributed.destroy_process_group方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。