當前位置: 首頁>>代碼示例>>Python>>正文


Python chainermn.create_communicator方法代碼示例

本文整理匯總了Python中chainermn.create_communicator方法的典型用法代碼示例。如果您正苦於以下問題:Python chainermn.create_communicator方法的具體用法?Python chainermn.create_communicator怎麽用?Python chainermn.create_communicator使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在chainermn的用法示例。


在下文中一共展示了chainermn.create_communicator方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        if self.iterator_class == chainer.iterators.MultiprocessIterator and \
                int(platform.python_version_tuple()[0]) < 3:
            pytest.skip('This test requires Python version >= 3')
        self.communicator = chainermn.create_communicator('naive')

        if self.communicator.size < 2:
            pytest.skip('This test is for multinode only')

        self.N = 100
        if self.paired_dataset:
            self.dataset = list(zip(
                np.arange(self.N).astype(np.float32),
                np.arange(self.N).astype(np.float32)))
        else:
            self.dataset = np.arange(self.N).astype(np.float32) 
開發者ID:chainer,項目名稱:chainer,代碼行數:18,代碼來源:test_multi_node_iterator.py

示例2: check_send_recv

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def check_send_recv(param, use_gpu, use_chx=False):
    communicator = create_communicator(param, use_gpu, use_chx)

    assert mpi_comm.Get_rank() == communicator.rank
    assert mpi_comm.Get_size() == communicator.size

    check_send_and_recv(communicator, 50)
    check_send_and_recv(communicator, 50, 20)

    check_send_and_recv(communicator, 50, 20, 5)
    check_send_and_recv(communicator, 50, 20, 5, 3)

    data = [np.ones((50)).astype(np.float32)]
    check_send_and_recv_tuple(communicator, data)

    data = [
        np.ones((50)).astype(np.float32),
        np.ones((50, 20)).astype(np.float32),
        np.ones((50, 20, 5)).astype(np.float32)]
    check_send_and_recv_tuple(communicator, data)

    communicator.finalize() 
開發者ID:chainer,項目名稱:chainer,代碼行數:24,代碼來源:test_communicator.py

示例3: setup

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setup(self, gpu):
        if gpu:
            self.communicator = chainermn.create_communicator('flat')
            self.device = self.communicator.intra_rank
            chainer.cuda.get_device_from_id(self.device).use()
        else:
            self.communicator = chainermn.create_communicator('naive')
            self.device = -1

        if self.communicator.size != 2:
            pytest.skip('This test is for two processes')

        # dtypes to be tested
        # DO NOT USE chainer.testing.parameterize
        # (because running order of generated test cases is not deterministic)
        self.dtypes = [np.int32, np.int64, np.float32, np.float64] 
開發者ID:chainer,項目名稱:chainer,代碼行數:18,代碼來源:test_communicator.py

示例4: test_smoke_wrapper

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def test_smoke_wrapper():
    rs = [[0, 1], ]
    comm = create_communicator('naive')
    if comm.size < 2:
        pytest.skip()

    snapshot = extensions.snapshot()
    filename = '{}.{}'.format(snapshot.filename, comm.rank)

    replica_sets = rs
    mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
    if comm.rank == 0:
        assert mn_snapshot.is_master
        assert filename == mn_snapshot.snapshot.filename
    elif comm.rank == 1:
        assert not mn_snapshot.is_master
    elif comm.rank == 2:
        assert mn_snapshot.is_master
        assert filename == mn_snapshot.snapshot.filename
    else:
        assert not mn_snapshot.is_master

    comm.finalize() 
開發者ID:chainer,項目名稱:chainer,代碼行數:25,代碼來源:test_multi_node_snapshot.py

示例5: setup_communicator

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setup_communicator(gpu):
    if gpu:
        communicator = chainermn.create_communicator('flat')
        chainer.backends.cuda.get_device_from_id(
            communicator.intra_rank).use()
    else:
        communicator = chainermn.create_communicator('naive')

    if communicator.size < 2:
        pytest.skip('This test is for multinode only')

    rank_next = communicator.rank + 1
    rank_prev = communicator.rank - 1

    if rank_prev < 0:
        rank_prev = None

    if rank_next >= communicator.size:
        rank_next = None

    return communicator, rank_prev, rank_next 
開發者ID:chainer,項目名稱:chainer,代碼行數:23,代碼來源:test_n_step_rnn.py

示例6: check_crossing_model

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def check_crossing_model(gpu, param):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(param.dtype)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    with chainer.using_config('dtype', param.dtype):
        if communicator.rank == 0:
            model = L.Classifier(Cross0(
                d, communicator, rank_next, rank_prev))
        else:
            model = L.Classifier(Cross1(
                d, communicator, rank_next, rank_prev))

        if gpu:
            model.to_device(cupy.cuda.Device())
            X = chainer.backends.cuda.to_gpu(X)
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward() 
開發者ID:chainer,項目名稱:chainer,代碼行數:25,代碼來源:test_multi_node_chain_list.py

示例7: setup_gpu

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setup_gpu(self, use_chx=False):
        self.comm = chainermn.create_communicator('flat')
        self.target = ExampleModel()
        self.device = chainermn.testing.get_device(self.comm.intra_rank,
                                                   use_chx)
        chainer.cuda.get_device_from_id(self.comm.intra_rank).use()
        self.target.to_device(self.device)

        self.target.a.W.data[:] = self.comm.rank
        self.target.b.W.data[:] = self.comm.rank + 1
        self.target.c.W.data[:] = self.comm.rank + 2
        self.target.a.W.grad[:] = 0
        self.target.b.W.grad[:] = 0
        self.target.c.W.grad[:] = 0
        self.actual_optimizer = chainer.GradientMethod()
        self.actual_optimizer.create_update_rule = mock.MagicMock 
開發者ID:chainer,項目名稱:chainer,代碼行數:18,代碼來源:test_multi_node_optimizer.py

示例8: setup

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setup(self, batched_copy):
        if nccl.get_build_version() < 2000:
            pytest.skip('This test requires NCCL version >= 2.0')
        self.comm = chainermn.create_communicator('pure_nccl',
                                                  batched_copy=batched_copy)
        device = self.comm.intra_rank
        chainer.cuda.get_device_from_id(device).use()
        self.target = ExampleModel()
        self.target.to_device(cupy.cuda.Device())
        self.target.a.W.data[:] = self.comm.rank
        self.target.b.W.data[:] = self.comm.rank + 1
        self.target.c.W.data[:] = self.comm.rank + 2
        self.target.a.W.grad[:] = 0
        self.target.b.W.grad[:] = 0
        self.target.c.W.grad[:] = 0
        self.actual_optimizer = chainer.GradientMethod()
        self.actual_optimizer.create_update_rule = mock.MagicMock 
開發者ID:chainer,項目名稱:chainer,代碼行數:19,代碼來源:test_double_buffering_optimizer.py

示例9: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        self.comm = create_communicator('naive')

        batchsize_per_process = 5
        batchsize = batchsize_per_process * self.comm.size
        if self.comm.rank == 0:
            masks = [np.random.uniform(size=(5, 32, 48)) > 0.5
                     for _ in range(10)]
            labels = [np.random.choice(np.arange(3, dtype=np.int32), size=(5,))
                      for _ in range(10)]
        else:
            masks = None
            labels = None
        initial_count = self.comm.rank * batchsize_per_process

        masks = self.comm.bcast_obj(masks)
        labels = self.comm.bcast_obj(labels)
        self.masks = masks
        self.labels = labels

        self.dataset = TupleDataset(
            np.random.uniform(size=(10, 3, 32, 48)),
            masks, labels)
        self.initial_count = initial_count
        self.batchsize = batchsize 
開發者ID:chainer,項目名稱:chainercv,代碼行數:27,代碼來源:test_instance_segmentation_voc_evaluator.py

示例10: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        self.comm = create_communicator('naive')

        batchsize_per_process = 5
        batchsize = batchsize_per_process * self.comm.size
        if self.comm.rank == 0:
            labels = [np.random.choice(
                np.arange(3, dtype=np.int32), size=(32, 48))
                for _ in range(10)]
        else:
            labels = None
        initial_count = self.comm.rank * batchsize_per_process

        labels = self.comm.bcast_obj(labels)
        self.labels = labels

        self.dataset = TupleDataset(
            np.random.uniform(size=(10, 3, 32, 48)),
            labels)
        self.initial_count = initial_count
        self.batchsize = batchsize 
開發者ID:chainer,項目名稱:chainercv,代碼行數:23,代碼來源:test_semantic_segmentation_evaluator.py

示例11: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        self.x = np.random.uniform(
            -1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
        self.gy = np.random.uniform(
            -1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)

        # Convolution is the identity function.
        initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                            dtype=np.float32).reshape((1, 1, 3, 3))
        bn_kwargs = {'decay': 0.8, 'comm': create_communicator('naive')}
        initial_bias = 0
        activ = relu
        self.l = Conv2DBNActiv(
            self.in_channels, self.out_channels, self.ksize, self.stride,
            self.pad, self.dilate, initialW=initialW,
            initial_bias=initial_bias, activ=activ, bn_kwargs=bn_kwargs) 
開發者ID:chainer,項目名稱:chainercv,代碼行數:18,代碼來源:test_conv_2d_bn_activ.py

示例12: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        self.x = np.random.uniform(
            -1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
        self.gy = np.random.uniform(
            -1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)

        # Convolution is the identity function.
        initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                            dtype=np.float32).reshape((1, 1, 3, 3))
        bn_kwargs = {'decay': 0.8, 'comm': create_communicator('naive')}
        initial_bias = 0
        activ = relu
        self.l = TFConv2DBNActiv(
            self.in_channels, self.out_channels, self.ksize, self.stride,
            self.pad, self.dilate, initialW=initialW,
            initial_bias=initial_bias, activ=activ, bn_kwargs=bn_kwargs) 
開發者ID:chainer,項目名稱:chainercv,代碼行數:18,代碼來源:test_tf_conv_2d_bn_activ.py

示例13: __init__

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def __init__(self, use_mpi):
        self.use_mpi = use_mpi

        # Setup
        if self.use_mpi:
            if not mpi_available:
                raise RuntimeError('ChainerMN required for MPI but cannot be imported. Abort.')
            comm = chainermn.create_communicator(FLAGS.comm_name)
            if comm.mpi_comm.rank == 0:
                print('==========================================')
                print('Num process (COMM_WORLD): {}'.format(MPI.COMM_WORLD.Get_size()))
                print('Communcator name: {}'.format(FLAGS.comm_name))
                print('==========================================')
            fleet_size = MPI.COMM_WORLD.Get_size()
            device = comm.intra_rank
        else:
            fleet_size = 1
            comm = None
            device = FLAGS.gpu

        self.fleet_size, self.comm, self.device = fleet_size, comm, device

        self.is_master = is_master = not self.use_mpi or (self.use_mpi and comm.rank == 0)

        # Early works
        if is_master:
            record_setting(FLAGS.out)

        # Show effective hps
        effective_hps = {
            'is_master': self.is_master,
            'stage_interval': self.stage_interval,
            'dynamic_batch_size': self.dynamic_batch_size
        }
        self.print_log('Effective hps: {}'.format(effective_hps)) 
開發者ID:pfnet-research,項目名稱:chainer-stylegan,代碼行數:37,代碼來源:train.py

示例14: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        self.communicator = chainermn.create_communicator('naive')

        if self.communicator.size < 2:
            pytest.skip("This test is for multinode only")

        N = 100
        self.dataset = np.arange(N).astype(np.float32) 
開發者ID:chainer,項目名稱:chainer,代碼行數:10,代碼來源:test_synchronized_iterator.py

示例15: setUp

# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_communicator [as 別名]
def setUp(self):
        if self.iterator_class == chainer.iterators.MultiprocessIterator and \
                int(platform.python_version_tuple()[0]) < 3:
            pytest.skip('This test requires Python version >= 3')
        self.communicator = chainermn.create_communicator('naive')

        if self.communicator.size < 2:
            pytest.skip('This test is for multinode only')

        self.N = 6
        self.dataset = numpy.arange(self.N).astype(numpy.float32)
        self.bs = 2 
開發者ID:chainer,項目名稱:chainer,代碼行數:14,代碼來源:test_iterator_compatibility.py


注:本文中的chainermn.create_communicator方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。