本文整理汇总了Python中chainer.training.extension.PRIORITY_WRITER属性的典型用法代码示例。如果您正苦于以下问题:Python extension.PRIORITY_WRITER属性的具体用法?Python extension.PRIORITY_WRITER怎么用?Python extension.PRIORITY_WRITER使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类chainer.training.extension
的用法示例。
在下文中一共展示了extension.PRIORITY_WRITER属性的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: observe_value
# 需要导入模块: from chainer.training import extension [as 别名]
# 或者: from chainer.training.extension import PRIORITY_WRITER [as 别名]
def observe_value(observation_key, target_func):
"""Returns a trainer extension to continuously record a value.
Args:
observation_key (str): Key of observation to record.
target_func (function): Function that returns the value to record.
It must take one argument: :class:~chainer.training.Trainer object.
Returns:
The extension function.
This extension is triggered each epoch by default.
To change this, use the ``trigger`` argument with the
:meth:`Trainer.extend() <chainer.training.Trainer.extend>` method.
"""
@extension.make_extension(
trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER)
def _observe_value(trainer):
trainer.observation[observation_key] = target_func(trainer)
return _observe_value
示例2: Monitor
# 需要导入模块: from chainer.training import extension [as 别名]
# 或者: from chainer.training.extension import PRIORITY_WRITER [as 别名]
def Monitor(base_name="main"):
"""Returns a trainer extension to monitor a model.
This extension calls the `monitor` method of a model
each epoch.
Note:
Not used. Here for reference.
"""
@extension.make_extension(
trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER)
def _monitor_model(trainer):
trainer.updater.get_all_optimizers()[base_name].target.predictor.monitor()
return _monitor_model
示例3: run_test_observation_aggregator
# 需要导入模块: from chainer.training import extension [as 别名]
# 或者: from chainer.training.extension import PRIORITY_WRITER [as 别名]
def run_test_observation_aggregator(comm, xp,
use_chainer_variable,
communicate_interval,
use_gpu):
model = DummyChain()
if use_gpu:
# Use CuPy's Device class to force call cudaSetDevice()
chainer.cuda.get_device_from_id(comm.intra_rank).use()
device = get_device(comm.intra_rank if use_gpu else None, xp == chainerx)
if xp == chainerx:
train = xp.array(np.random.rand(10, 1).astype(np.float32))
else:
train = xp.random.rand(10, 1).astype(np.float32)
model.to_device(device)
train_iter = chainer.iterators.SerialIterator(train,
batch_size=1,
repeat=True,
shuffle=True)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
updater = chainer.training.StandardUpdater(train_iter, optimizer,
device=device)
trainer = chainer.training.Trainer(updater, (1, 'epoch'))
@extension.make_extension(
trigger=(1, 'iteration'), priority=extension.PRIORITY_WRITER)
def rank_reporter(trainer_):
tmp = xp.asarray(comm.rank, dtype=np.float32)
if use_chainer_variable:
tmp = chainer.Variable(tmp)
trainer_.observation['rank'] = tmp
@extension.make_extension(
trigger=(communicate_interval, 'iteration'),
priority=extension.PRIORITY_READER)
def aggregated_rank_checker(trainer_):
actual = trainer_.observation['rank-aggregated']
if use_chainer_variable:
actual = actual.data
expected = (comm.size - 1) / 2
chainer.testing.assert_allclose(actual, expected)
trainer.extend(rank_reporter)
trainer.extend(ObservationAggregator(
comm, 'rank', 'rank-aggregated',
comm_trigger=(communicate_interval, 'iteration')))
trainer.extend(aggregated_rank_checker)
trainer.run()