本文整理汇总了Python中chainermn.create_communicator方法的典型用法代码示例。如果您正苦于以下问题:Python chainermn.create_communicator方法的具体用法?Python chainermn.create_communicator怎么用?Python chainermn.create_communicator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainermn
的用法示例。
在下文中一共展示了chainermn.create_communicator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
if self.iterator_class == chainer.iterators.MultiprocessIterator and \
int(platform.python_version_tuple()[0]) < 3:
pytest.skip('This test requires Python version >= 3')
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip('This test is for multinode only')
self.N = 100
if self.paired_dataset:
self.dataset = list(zip(
np.arange(self.N).astype(np.float32),
np.arange(self.N).astype(np.float32)))
else:
self.dataset = np.arange(self.N).astype(np.float32)
示例2: check_send_recv
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def check_send_recv(param, use_gpu, use_chx=False):
communicator = create_communicator(param, use_gpu, use_chx)
assert mpi_comm.Get_rank() == communicator.rank
assert mpi_comm.Get_size() == communicator.size
check_send_and_recv(communicator, 50)
check_send_and_recv(communicator, 50, 20)
check_send_and_recv(communicator, 50, 20, 5)
check_send_and_recv(communicator, 50, 20, 5, 3)
data = [np.ones((50)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
data = [
np.ones((50)).astype(np.float32),
np.ones((50, 20)).astype(np.float32),
np.ones((50, 20, 5)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
communicator.finalize()
示例3: setup
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('flat')
self.device = self.communicator.intra_rank
chainer.cuda.get_device_from_id(self.device).use()
else:
self.communicator = chainermn.create_communicator('naive')
self.device = -1
if self.communicator.size != 2:
pytest.skip('This test is for two processes')
# dtypes to be tested
# DO NOT USE chainer.testing.parameterize
# (because running order of generated test cases is not deterministic)
self.dtypes = [np.int32, np.int64, np.float32, np.float64]
示例4: test_smoke_wrapper
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def test_smoke_wrapper():
rs = [[0, 1], ]
comm = create_communicator('naive')
if comm.size < 2:
pytest.skip()
snapshot = extensions.snapshot()
filename = '{}.{}'.format(snapshot.filename, comm.rank)
replica_sets = rs
mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
if comm.rank == 0:
assert mn_snapshot.is_master
assert filename == mn_snapshot.snapshot.filename
elif comm.rank == 1:
assert not mn_snapshot.is_master
elif comm.rank == 2:
assert mn_snapshot.is_master
assert filename == mn_snapshot.snapshot.filename
else:
assert not mn_snapshot.is_master
comm.finalize()
示例5: setup_communicator
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setup_communicator(gpu):
if gpu:
communicator = chainermn.create_communicator('flat')
chainer.backends.cuda.get_device_from_id(
communicator.intra_rank).use()
else:
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode only')
rank_next = communicator.rank + 1
rank_prev = communicator.rank - 1
if rank_prev < 0:
rank_prev = None
if rank_next >= communicator.size:
rank_next = None
return communicator, rank_prev, rank_next
示例6: check_crossing_model
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def check_crossing_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
model = L.Classifier(Cross0(
d, communicator, rank_next, rank_prev))
else:
model = L.Classifier(Cross1(
d, communicator, rank_next, rank_prev))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
示例7: setup_gpu
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setup_gpu(self, use_chx=False):
self.comm = chainermn.create_communicator('flat')
self.target = ExampleModel()
self.device = chainermn.testing.get_device(self.comm.intra_rank,
use_chx)
chainer.cuda.get_device_from_id(self.comm.intra_rank).use()
self.target.to_device(self.device)
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
示例8: setup
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setup(self, batched_copy):
if nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
self.comm = chainermn.create_communicator('pure_nccl',
batched_copy=batched_copy)
device = self.comm.intra_rank
chainer.cuda.get_device_from_id(device).use()
self.target = ExampleModel()
self.target.to_device(cupy.cuda.Device())
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
示例9: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
self.comm = create_communicator('naive')
batchsize_per_process = 5
batchsize = batchsize_per_process * self.comm.size
if self.comm.rank == 0:
masks = [np.random.uniform(size=(5, 32, 48)) > 0.5
for _ in range(10)]
labels = [np.random.choice(np.arange(3, dtype=np.int32), size=(5,))
for _ in range(10)]
else:
masks = None
labels = None
initial_count = self.comm.rank * batchsize_per_process
masks = self.comm.bcast_obj(masks)
labels = self.comm.bcast_obj(labels)
self.masks = masks
self.labels = labels
self.dataset = TupleDataset(
np.random.uniform(size=(10, 3, 32, 48)),
masks, labels)
self.initial_count = initial_count
self.batchsize = batchsize
示例10: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
self.comm = create_communicator('naive')
batchsize_per_process = 5
batchsize = batchsize_per_process * self.comm.size
if self.comm.rank == 0:
labels = [np.random.choice(
np.arange(3, dtype=np.int32), size=(32, 48))
for _ in range(10)]
else:
labels = None
initial_count = self.comm.rank * batchsize_per_process
labels = self.comm.bcast_obj(labels)
self.labels = labels
self.dataset = TupleDataset(
np.random.uniform(size=(10, 3, 32, 48)),
labels)
self.initial_count = initial_count
self.batchsize = batchsize
示例11: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape((1, 1, 3, 3))
bn_kwargs = {'decay': 0.8, 'comm': create_communicator('naive')}
initial_bias = 0
activ = relu
self.l = Conv2DBNActiv(
self.in_channels, self.out_channels, self.ksize, self.stride,
self.pad, self.dilate, initialW=initialW,
initial_bias=initial_bias, activ=activ, bn_kwargs=bn_kwargs)
示例12: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape((1, 1, 3, 3))
bn_kwargs = {'decay': 0.8, 'comm': create_communicator('naive')}
initial_bias = 0
activ = relu
self.l = TFConv2DBNActiv(
self.in_channels, self.out_channels, self.ksize, self.stride,
self.pad, self.dilate, initialW=initialW,
initial_bias=initial_bias, activ=activ, bn_kwargs=bn_kwargs)
示例13: __init__
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def __init__(self, use_mpi):
self.use_mpi = use_mpi
# Setup
if self.use_mpi:
if not mpi_available:
raise RuntimeError('ChainerMN required for MPI but cannot be imported. Abort.')
comm = chainermn.create_communicator(FLAGS.comm_name)
if comm.mpi_comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(MPI.COMM_WORLD.Get_size()))
print('Communcator name: {}'.format(FLAGS.comm_name))
print('==========================================')
fleet_size = MPI.COMM_WORLD.Get_size()
device = comm.intra_rank
else:
fleet_size = 1
comm = None
device = FLAGS.gpu
self.fleet_size, self.comm, self.device = fleet_size, comm, device
self.is_master = is_master = not self.use_mpi or (self.use_mpi and comm.rank == 0)
# Early works
if is_master:
record_setting(FLAGS.out)
# Show effective hps
effective_hps = {
'is_master': self.is_master,
'stage_interval': self.stage_interval,
'dynamic_batch_size': self.dynamic_batch_size
}
self.print_log('Effective hps: {}'.format(effective_hps))
示例14: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip("This test is for multinode only")
N = 100
self.dataset = np.arange(N).astype(np.float32)
示例15: setUp
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_communicator [as 别名]
def setUp(self):
if self.iterator_class == chainer.iterators.MultiprocessIterator and \
int(platform.python_version_tuple()[0]) < 3:
pytest.skip('This test requires Python version >= 3')
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip('This test is for multinode only')
self.N = 6
self.dataset = numpy.arange(self.N).astype(numpy.float32)
self.bs = 2