本文整理汇总了Python中chainermn.create_multi_node_evaluator方法的典型用法代码示例。如果您正苦于以下问题:Python chainermn.create_multi_node_evaluator方法的具体用法?Python chainermn.create_multi_node_evaluator怎么用?Python chainermn.create_multi_node_evaluator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainermn
的用法示例。
在下文中一共展示了chainermn.create_multi_node_evaluator方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_example
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_multi_node_evaluator [as 别名]
def get_example(self, i):
# It reads the i-th image/label pair and return a preprocessed image.
# It applies following preprocesses:
# - Cropping (random or center rectangular)
# - Random flip
# - Scaling to [0, 1] value
crop_size = self.crop_size
image, label = self.base[i]
_, h, w = image.shape
if self.random:
# Randomly crop a region and flip the image
top = random.randint(0, h - crop_size - 1)
left = random.randint(0, w - crop_size - 1)
if random.randint(0, 1):
image = image[:, :, ::-1]
else:
# Crop the center
top = (h - crop_size) // 2
left = (w - crop_size) // 2
bottom = top + crop_size
right = left + crop_size
image = image[:, top:bottom, left:right]
image -= self.mean[:, top:bottom, left:right]
image *= (1.0 / 255.0) # Scale to [0, 1]
return image, label
# chainermn.create_multi_node_evaluator can be also used with user customized
# evaluator classes that inherit chainer.training.extensions.Evaluator.
示例2: objective
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_multi_node_evaluator [as 别名]
def objective(trial, comm):
# Sample an architecture.
model = L.Classifier(create_model(trial))
# Setup optimizer.
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(model)
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
# Setup dataset and iterator. Only worker 0 loads the whole dataset.
# The dataset of worker 0 is evenly split and distributed to all workers.
if comm.rank == 0:
train, valid = chainer.datasets.get_mnist()
rng = np.random.RandomState(0)
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
else:
train, valid = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
valid = chainermn.scatter_dataset(valid, comm)
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE, shuffle=True)
valid_iter = chainer.iterators.SerialIterator(valid, BATCHSIZE, repeat=False, shuffle=False)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
# Add Chainer extension for pruners.
trainer.extend(
optuna.integration.ChainerPruningExtension(
trial, "validation/main/accuracy", (PRUNER_INTERVAL, "epoch")
)
)
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
trainer.extend(chainermn.create_multi_node_evaluator(evaluator, comm))
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(log_report_extension)
if comm.rank == 0:
trainer.extend(chainer.training.extensions.ProgressBar())
# Run training.
# Please set show_loop_exception_msg False to inhibit messages about TrialPruned exception.
# ChainerPruningExtension raises TrialPruned exception to stop training, and
# trainer shows some messages every time it receive TrialPruned.
trainer.run(show_loop_exception_msg=False)
# Evaluate.
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
report = evaluator()
return report["main/accuracy"]
示例3: objective
# 需要导入模块: import chainermn [as 别名]
# 或者: from chainermn import create_multi_node_evaluator [as 别名]
def objective(trial, comm):
# Sample an architecture.
model = L.Classifier(create_model(trial))
# Setup optimizer.
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(model)
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
# Setup dataset and iterator. Only worker 0 loads the whole dataset.
# The dataset of worker 0 is evenly split and distributed to all workers.
if comm.rank == 0:
train, valid = chainer.datasets.get_mnist()
rng = np.random.RandomState(0)
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
else:
train, valid = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
valid = chainermn.scatter_dataset(valid, comm)
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE, shuffle=True)
valid_iter = chainer.iterators.SerialIterator(valid, BATCHSIZE, repeat=False, shuffle=False)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
if comm.rank == 0:
trainer.extend(chainer.training.extensions.ProgressBar())
# Run training.
trainer.run()
# Evaluate.
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
report = evaluator()
return report["main/accuracy"]