本文整理汇总了Python中chainer.serializers.save_npz方法的典型用法代码示例。如果您正苦于以下问题:Python serializers.save_npz方法的具体用法?Python serializers.save_npz怎么用?Python serializers.save_npz使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.serializers
的用法示例。
在下文中一共展示了serializers.save_npz方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __save
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def __save(self, dirname, ancestors):
os.makedirs(dirname, exist_ok=True)
ancestors.append(self)
for attr in self.saved_attributes:
assert hasattr(self, attr)
attr_value = getattr(self, attr)
if attr_value is None:
continue
if isinstance(attr_value, AttributeSavingMixin):
assert not any(
attr_value is ancestor
for ancestor in ancestors
), "Avoid an infinite loop"
attr_value.__save(os.path.join(dirname, attr), ancestors)
else:
serializers.save_npz(
os.path.join(dirname, '{}.npz'.format(attr)),
getattr(self, attr))
ancestors.pop()
示例2: test_resumed_trigger
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected, finished in zip(self.expected[:self.resume],
self.finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.expected[self.resume:],
self.finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
示例3: test_resumed_trigger_sparse_call
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
serializers.save_npz(f.name, trigger)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
示例4: main
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('caffemodel')
parser.add_argument('output')
args = parser.parse_args()
proto_path = 'weights/pspnet101_cityscapes_713.prototxt'
n_class = 19
input_size = (713, 713)
model = PSPNetResNet101(
n_class, None, input_size)
model(np.random.uniform(size=(1, 3) + input_size).astype(np.float32))
caffe_param = caffe_pb2.NetParameter()
caffe_param.MergeFromString(open(args.caffemodel, 'rb').read())
caffe_net = text_format.Merge(
open(proto_path).read(), caffe_pb2.NetParameter())
transfer(model, caffe_param, caffe_net)
serializers.save_npz(args.output, model)
示例5: train
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def train(epoch=10, batch_size=32, gpu=False):
if gpu:
cuda.check_cuda_available()
xp = cuda.cupy if gpu else np
td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP)
# make mean image
if not os.path.isfile(MEAN_IMAGE_FILE):
print("make mean image...")
td.make_mean_image(MEAN_IMAGE_FILE)
else:
td.mean_image_file = MEAN_IMAGE_FILE
# train model
label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
model = alex.Alex(len(label_def))
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
epoch = epoch
batch_size = batch_size
print("Now our model is {0} classification task.".format(len(label_def)))
print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size))
if gpu:
model.to_gpu()
for i in range(epoch):
print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr))
td.shuffle(overwrite=True)
for x_batch, y_batch in td.generate_batches(batch_size):
x = chainer.Variable(xp.asarray(x_batch))
t = chainer.Variable(xp.asarray(y_batch))
optimizer.update(model, x, t)
print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data)))
serializers.save_npz(MODEL_FILE, model)
optimizer.lr *= 0.97
示例6: __call__
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def __call__(self, trainer):
encdec = trainer.updater.get_optimizer("main").target
log.info("computing %s" % self.observation_name)
dev_loss = compute_loss_all(encdec, self.data, self.eos_idx, self.mb_size,
gpu=self.gpu,
reverse_src=self.reverse_src, reverse_tgt=self.reverse_tgt,
use_chainerx=self.use_chainerx)
log.info("%s: %f (current best: %r)" % (self.observation_name, dev_loss, self.best_loss))
chainer.reporter.report({self.observation_name: dev_loss})
if self.best_loss is None or self.best_loss > dev_loss:
log.info("loss (%s) improvement: %r -> %r" % (self.observation_name,
self.best_loss, dev_loss))
self.best_loss = dev_loss
if self.save_best_model_to is not None:
log.info("saving best loss (%s) model to %s" % (self.observation_name, self.save_best_model_to,))
serializers.save_npz(self.save_best_model_to, encdec)
if self.config_training is not None:
config_session = self.config_training.copy(readonly=False)
config_session.add_section("model_parameters", keep_at_bottom="metadata")
config_session["model_parameters"]["filename"] = self.save_best_model_to
config_session["model_parameters"]["type"] = "model"
config_session["model_parameters"]["description"] = "best_loss"
config_session["model_parameters"]["infos"] = argument_parsing_tools.OrderedNamespace()
config_session["model_parameters"]["infos"]["loss"] = float(dev_loss)
config_session["model_parameters"]["infos"]["iteration"] = trainer.updater.iteration
config_session.set_metadata_modified_time()
config_session.save_to(self.save_best_model_to + ".config")
# json.dump(config_session, open(self.save_best_model_to + ".config", "w"), indent=2, separators=(',', ': '))
示例7: test_resumed_trigger_sparse_call
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected, finished in zip(self.expected[:self.resume],
self.finished[:self.resume]):
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
self.assertEqual(trigger.finished, finished)
accumulated = False
serializers.save_npz(f.name, trigger)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.expected[self.resume:],
self.finished[self.resume:]):
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
self.assertEqual(trigger.finished, finished)
accumulated = False
示例8: test_resumed_trigger
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
示例9: _test_trigger
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def _test_trigger(self, trigger, key, accuracies, expected,
resume=None, save=None):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=(len(accuracies), 'iteration'),
iter_per_epoch=self.iter_per_epoch)
updater = trainer.updater
def _serialize_updater(serializer):
updater.iteration = serializer('iteration', updater.iteration)
updater.epoch = serializer('epoch', updater.epoch)
updater.is_new_epoch = serializer(
'is_new_epoch', updater.is_new_epoch)
trainer.updater.serialize = _serialize_updater
def set_observation(t):
t.observation = {key: accuracies[t.updater.iteration-1]}
trainer.extend(set_observation, name='set_observation',
trigger=(1, 'iteration'), priority=2)
invoked_iterations = []
def record(t):
invoked_iterations.append(t.updater.iteration)
trainer.extend(record, name='record', trigger=trigger, priority=1)
if resume is not None:
serializers.load_npz(resume, trainer)
trainer.run()
self.assertEqual(invoked_iterations, expected)
if save is not None:
serializers.save_npz(save, trainer)
示例10: test_resumed_trigger_sparse_call
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
accumulated_finished = True
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
示例11: check_serialization
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def check_serialization(self, backend_config):
with utils.tempdir() as root:
filename = os.path.join(root, 'tmp.npz')
layer1 = self.layer.copy('copy')
hook1 = copy.deepcopy(self.hook)
layer1.add_hook(hook1)
layer1.to_device(backend_config.device)
x = backend_config.get_array(self.x)
with backend_config:
layer1(x)
with chainer.using_config('train', False):
y1 = layer1(x)
serializers.save_npz(filename, layer1)
layer2 = self.layer.copy('copy')
hook2 = copy.deepcopy(self.hook)
layer2.add_hook(hook2)
# Test loading is nice.
msg = None
try:
serializers.load_npz(filename, layer2)
except Exception as e:
msg = e
assert msg is None
with chainer.using_config('train', False):
y2 = layer2(self.x.copy())
# Test attributes are the same.
orig_weight = _cpu._to_cpu(
getattr(layer1, hook1.weight_name).array)
orig_vector = _cpu._to_cpu(getattr(layer1, hook1.vector_name))
numpy.testing.assert_array_equal(
orig_weight, getattr(layer2, hook2.weight_name).array)
numpy.testing.assert_array_equal(
orig_vector, getattr(layer2, hook2.vector_name))
testing.assert_allclose(y1.array, y2.array)
示例12: save_and_load_npz
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def save_and_load_npz(src, dst):
"""Saves ``src`` to an NPZ file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using NPZ de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.npz',
serializers.save_npz, serializers.load_npz)
示例13: caffe_to_chainermodel
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def caffe_to_chainermodel(model, caffe_prototxt, caffemodel_path,
chainermodel_path):
os.chdir(osp.dirname(caffe_prototxt))
net = caffe.Net(caffe_prototxt, caffemodel_path, caffe.TEST)
for name, param in net.params.iteritems():
try:
layer = getattr(model, name)
except AttributeError:
print('Skipping caffe layer: %s' % name)
continue
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W: %s %s' % (param[0].data.shape, layer.W.data.shape))
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b: %s %s' % (param[1].data.shape, layer.b.data.shape))
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_npz(chainermodel_path, model)
示例14: __init__
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def __init__(self, directory: str, savefun=None, fs=None):
assert directory is not None
self.directory = directory
self.savefun = save_npz if savefun is None else savefun
if fs is None:
self.fs = pfio
elif isinstance(fs, str):
self.fs = pfio.create_handler(fs)
else:
self.fs = fs
if not self.fs.exists(self.directory):
self.fs.makedirs(self.directory)
示例15: on_epoch_done
# 需要导入模块: from chainer import serializers [as 别名]
# 或者: from chainer.serializers import save_npz [as 别名]
def on_epoch_done(epoch, n, o, loss, acc, valid_loss, valid_acc, test_loss, test_acc):
error = 100 * (1 - acc)
valid_error = 100 * (1 - valid_acc)
test_error = 100 * (1 - test_acc)
print('epoch {} done'.format(epoch))
print('train loss: {} error: {}'.format(loss, error))
print('valid loss: {} error: {}'.format(valid_loss, valid_error))
print('test loss: {} error: {}'.format(test_loss, test_error))
if valid_error < state['best_valid_error']:
serializers.save_npz('{}.model'.format(model_prefix), n)
serializers.save_npz('{}.state'.format(model_prefix), o)
state['best_valid_error'] = valid_error
state['best_test_error'] = test_error
if args.save_iter > 0 and (epoch + 1) % args.save_iter == 0:
serializers.save_npz('{}_{}.model'.format(model_prefix, epoch + 1), n)
serializers.save_npz('{}_{}.state'.format(model_prefix, epoch + 1), o)
# prevent divergence when using identity mapping model
if args.model == 'identity_mapping' and epoch < 9:
o.lr = 0.01 + 0.01 * (epoch + 1)
# if len(lr_decay_iter) == 1 and (epoch + 1) % lr_decay_iter[0] == 0 or epoch + 1 in lr_decay_iter:
# Note, "lr_decay_iter" should be a list object to store a training schedule,
# However, to keep up with the Python3.5, I changed to an integer value...
if (epoch + 1) % args.lr_decay_iter == 0 and epoch > 1:
if hasattr(optimizer, 'alpha'):
o.alpha *= 0.1
else:
o.lr *= 0.1
clock = time.clock()
print('elapsed time: {}'.format(clock - state['clock']))
state['clock'] = clock
with open(log_file_path, 'a') as f:
f.write('{},{},{},{},{},{},{}\n'.format(epoch + 1, loss, error, valid_loss, valid_error, test_loss, test_error))