本文整理匯總了Python中chainermn.create_multi_node_optimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python chainermn.create_multi_node_optimizer方法的具體用法?Python chainermn.create_multi_node_optimizer怎麽用?Python chainermn.create_multi_node_optimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類chainermn
的用法示例。
在下文中一共展示了chainermn.create_multi_node_optimizer方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def __init__(
self,
model_parameters,
# Learning rate at training step s with annealing
initial_lr=1e-4,
final_lr=1e-5,
annealing_steps=1600000,
# Learning rate as used by the Adam algorithm
beta_1=0.9,
beta_2=0.99,
# Adam regularisation parameter
eps=1e-8,
initial_training_step=0,
communicator=None):
self.initial_lr = initial_lr
self.final_lr = final_lr
self.annealing_steps = annealing_steps
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
lr = self.compute_lr_at_step(initial_training_step)
self.optimizer = optimizers.Adam(
lr, beta1=beta_1, beta2=beta_2, eps=eps)
self.optimizer.setup(model_parameters)
self.multi_node_optimizer = None
if communicator:
self.multi_node_optimizer = chainermn.create_multi_node_optimizer(
self.optimizer, communicator)
示例2: make_optimizer
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def make_optimizer(self, model, alpha, beta1, beta2):
self.print_log('Use Adam Optimizer with alpah = {}, beta1 = {}, beta2 = {}'.format(alpha, beta1, beta2))
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1, beta2=beta2)
if self.use_mpi:
self.print_log('Use Optimizer with MPI')
optimizer = chainermn.create_multi_node_optimizer(optimizer, self.comm)
optimizer.setup(model)
return optimizer
示例3: make_adam
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def make_adam(model, lr=0.0002, beta1=0.9, beta2=0.999):
optimizer = chainer.optimizers.Adam(alpha=lr, beta1=beta1, beta2=beta2)
if chainer.config.using_chainermn:
optimizer = chainermn.create_multi_node_optimizer(optimizer, chainer.config.communicator)
optimizer.setup(model)
return optimizer
示例4: make_optim_adam
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def make_optim_adam(model, lr=0.0002, beta1=0.9, beta2=0.999):
optimizer = OptimAdam(alpha=lr, beta1=beta1, beta2=beta2)
if chainer.config.using_chainermn:
optimizer = chainermn.create_multi_node_optimizer(optimizer, chainer.config.communicator)
optimizer.setup(model)
return optimizer
示例5: _prepare_multinode_snapshot
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def _prepare_multinode_snapshot(n, result):
n_units = 100
batchsize = 10
comm = create_communicator('naive')
model = L.Classifier(MLP(n_units, 10))
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
if comm.rank == 0:
train, _ = chainer.datasets.get_mnist()
else:
train, _ = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, batchsize)
updater = StandardUpdater(train_iter, optimizer)
trainer = Trainer(updater, out=result)
snapshot = extensions.snapshot(target=updater, autoload=True)
replica_sets = []
mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
mn_snapshot.initialize(trainer)
for _ in range(n):
updater.update()
return updater, mn_snapshot, trainer
示例6: setup_mnist_trainer
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def setup_mnist_trainer(self, display_log=False, use_chx=False):
batchsize = 100
n_units = 100
comm = self.communicator
model = L.Classifier(MLP(n_units, 10))
model.to_device(get_device(None, use_chx))
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
repeat=False,
shuffle=False)
updater = training.StandardUpdater(
train_iter,
optimizer
)
return updater, optimizer, train_iter, test_iter, model
示例7: test_update_with_cpu
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
assert self.actual_optimizer.t == 0
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
assert self.actual_optimizer.t == 1
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
示例8: test_update_with_gpu
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def test_update_with_gpu(self, use_chx):
self.setup_gpu(use_chx)
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
assert self.actual_optimizer.t == 0
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
assert self.actual_optimizer.t == 1
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
示例9: run_test_observation_aggregator
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def run_test_observation_aggregator(comm, xp,
use_chainer_variable,
communicate_interval,
use_gpu):
model = DummyChain()
if use_gpu:
# Use CuPy's Device class to force call cudaSetDevice()
chainer.cuda.get_device_from_id(comm.intra_rank).use()
device = get_device(comm.intra_rank if use_gpu else None, xp == chainerx)
if xp == chainerx:
train = xp.array(np.random.rand(10, 1).astype(np.float32))
else:
train = xp.random.rand(10, 1).astype(np.float32)
model.to_device(device)
train_iter = chainer.iterators.SerialIterator(train,
batch_size=1,
repeat=True,
shuffle=True)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
updater = chainer.training.StandardUpdater(train_iter, optimizer,
device=device)
trainer = chainer.training.Trainer(updater, (1, 'epoch'))
@extension.make_extension(
trigger=(1, 'iteration'), priority=extension.PRIORITY_WRITER)
def rank_reporter(trainer_):
tmp = xp.asarray(comm.rank, dtype=np.float32)
if use_chainer_variable:
tmp = chainer.Variable(tmp)
trainer_.observation['rank'] = tmp
@extension.make_extension(
trigger=(communicate_interval, 'iteration'),
priority=extension.PRIORITY_READER)
def aggregated_rank_checker(trainer_):
actual = trainer_.observation['rank-aggregated']
if use_chainer_variable:
actual = actual.data
expected = (comm.size - 1) / 2
chainer.testing.assert_allclose(actual, expected)
trainer.extend(rank_reporter)
trainer.extend(ObservationAggregator(
comm, 'rank', 'rank-aggregated',
comm_trigger=(communicate_interval, 'iteration')))
trainer.extend(aggregated_rank_checker)
trainer.run()
示例10: check_update
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def check_update(self, batched_copy):
self.setup(batched_copy)
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm, double_buffering=True)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 0)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.c.W.grad,
(base + 2) * np.ones((5, 4)))
self.optimizer.target.a.W.grad[:] = self.comm.rank + 3
self.optimizer.target.b.W.grad[:] = self.comm.rank + 4
self.optimizer.target.c.W.grad[:] = self.comm.rank + 5
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 3) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 4) * np.ones((4, 3)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.c.W.grad,
(base + 5) * np.ones((5, 4)))
self.comm.finalize()
示例11: objective
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def objective(trial, comm):
# Sample an architecture.
model = L.Classifier(create_model(trial))
# Setup optimizer.
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(model)
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
# Setup dataset and iterator. Only worker 0 loads the whole dataset.
# The dataset of worker 0 is evenly split and distributed to all workers.
if comm.rank == 0:
train, valid = chainer.datasets.get_mnist()
rng = np.random.RandomState(0)
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
else:
train, valid = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
valid = chainermn.scatter_dataset(valid, comm)
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE, shuffle=True)
valid_iter = chainer.iterators.SerialIterator(valid, BATCHSIZE, repeat=False, shuffle=False)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
# Add Chainer extension for pruners.
trainer.extend(
optuna.integration.ChainerPruningExtension(
trial, "validation/main/accuracy", (PRUNER_INTERVAL, "epoch")
)
)
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
trainer.extend(chainermn.create_multi_node_evaluator(evaluator, comm))
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(log_report_extension)
if comm.rank == 0:
trainer.extend(chainer.training.extensions.ProgressBar())
# Run training.
# Please set show_loop_exception_msg False to inhibit messages about TrialPruned exception.
# ChainerPruningExtension raises TrialPruned exception to stop training, and
# trainer shows some messages every time it receive TrialPruned.
trainer.run(show_loop_exception_msg=False)
# Evaluate.
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
report = evaluator()
return report["main/accuracy"]
示例12: objective
# 需要導入模塊: import chainermn [as 別名]
# 或者: from chainermn import create_multi_node_optimizer [as 別名]
def objective(trial, comm):
# Sample an architecture.
model = L.Classifier(create_model(trial))
# Setup optimizer.
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(model)
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
# Setup dataset and iterator. Only worker 0 loads the whole dataset.
# The dataset of worker 0 is evenly split and distributed to all workers.
if comm.rank == 0:
train, valid = chainer.datasets.get_mnist()
rng = np.random.RandomState(0)
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
else:
train, valid = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
valid = chainermn.scatter_dataset(valid, comm)
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE, shuffle=True)
valid_iter = chainer.iterators.SerialIterator(valid, BATCHSIZE, repeat=False, shuffle=False)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
if comm.rank == 0:
trainer.extend(chainer.training.extensions.ProgressBar())
# Run training.
trainer.run()
# Evaluate.
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
report = evaluator()
return report["main/accuracy"]