本文整理汇总了Python中chainer.optimizers.MomentumSGD方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.MomentumSGD方法的具体用法?Python optimizers.MomentumSGD怎么用?Python optimizers.MomentumSGD使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.optimizers
的用法示例。
在下文中一共展示了optimizers.MomentumSGD方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4):
super(OptimizerStochasticDepth, self).__init__(model)
self.lr = lr
self.momentum = momentum
self.schedule = schedule
self.weight_decay = weight_decay
all_links = OptimizerStochasticDepth._find(model)
optimizer_set = []
for link in all_links:
optimizer = optimizers.MomentumSGD(lr, momentum)
weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
optimizer.setup(link[0])
optimizer.add_hook(weight_decay)
optimizer_set.append(optimizer)
self.optimizer_set = optimizer_set
self.all_links = all_links
开发者ID:nutszebra,项目名称:neural_architecture_search_with_reinforcement_learning_appendix_a,代码行数:18,代码来源:nutszebra_optimizer.py
示例2: test_remove_link
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def test_remove_link(self):
opt = optimizers.MomentumSGD(lr=0.01)
# Update each depth
for depth in six.moves.range(1, self.n_encdec + 1):
model = segnet.SegNet(self.n_encdec, self.n_classes,
self.x_shape[1], self.n_mid)
model = segnet.SegNetLoss(
model, class_weight=None, train_depth=depth)
opt.setup(model)
# Deregister non-target links from opt
if depth > 1:
model.predictor.remove_link('conv_cls')
for d in range(1, self.n_encdec + 1):
if d != depth:
model.predictor.remove_link('encdec{}'.format(d))
for name, link in model.namedparams():
if depth > 1:
self.assertTrue(
'encdec{}'.format(depth) in name)
else:
self.assertTrue(
'encdec{}'.format(depth) in name or 'conv_cls' in name)
示例3: get_optimizer
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
adam_beta2=None, adam_eps=None, weight_decay=None):
if opt == 'MomentumSGD':
optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
elif opt == 'Adam':
optimizer = optimizers.Adam(
alpha=adam_alpha, beta1=adam_beta1,
beta2=adam_beta2, eps=adam_eps)
elif opt == 'AdaGrad':
optimizer = optimizers.AdaGrad(lr=lr)
elif opt == 'RMSprop':
optimizer = optimizers.RMSprop(lr=lr)
else:
raise Exception('No optimizer is selected')
# The first model as the master model
if opt == 'MomentumSGD':
optimizer.decay = weight_decay
return optimizer
示例4: train
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def train(epoch=10, batch_size=32, gpu=False):
if gpu:
cuda.check_cuda_available()
xp = cuda.cupy if gpu else np
td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP)
# make mean image
if not os.path.isfile(MEAN_IMAGE_FILE):
print("make mean image...")
td.make_mean_image(MEAN_IMAGE_FILE)
else:
td.mean_image_file = MEAN_IMAGE_FILE
# train model
label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
model = alex.Alex(len(label_def))
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
epoch = epoch
batch_size = batch_size
print("Now our model is {0} classification task.".format(len(label_def)))
print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size))
if gpu:
model.to_gpu()
for i in range(epoch):
print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr))
td.shuffle(overwrite=True)
for x_batch, y_batch in td.generate_batches(batch_size):
x = chainer.Variable(xp.asarray(x_batch))
t = chainer.Variable(xp.asarray(y_batch))
optimizer.update(model, x, t)
print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data)))
serializers.save_npz(MODEL_FILE, model)
optimizer.lr *= 0.97
示例5: prepare_optimizer
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def prepare_optimizer(self):
if self.args.opt == 'MomentumSGD':
self.opt = optimizers.MomentumSGD(momentum=0.9)
elif self.args.opt == 'Adam':
self.opt = optimizers.Adam(alpha=self.args.adam_alpha)
print('Adam alpha=', self.args.adam_alpha)
else:
raise ValueError('Opt should be MomentumSGD or Adam.')
self.opt.setup(self.x_link)
示例6: get_model_optimizer
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def get_model_optimizer(args):
model = get_model(args)
if 'opt' in args:
# prepare optimizer
if args.opt == 'MomentumSGD':
optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
elif args.opt == 'Adam':
optimizer = optimizers.Adam(alpha=args.alpha)
elif args.opt == 'AdaGrad':
optimizer = optimizers.AdaGrad(lr=args.lr)
else:
raise Exception('No optimizer is selected')
optimizer.setup(model)
if args.opt == 'MomentumSGD':
optimizer.add_hook(
chainer.optimizer.WeightDecay(args.weight_decay))
if args.resume_opt is not None:
serializers.load_hdf5(args.resume_opt, optimizer)
args.epoch_offset = int(
re.search('epoch-([0-9]+)', args.resume_opt).groups()[0])
return model, optimizer
else:
print('No optimizer generated.')
return model
示例7: __init__
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):
super(OptimizerResnet, self).__init__(model)
optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)
weight_decay = chainer.optimizer.WeightDecay(weight_decay)
optimizer.setup(self.model)
optimizer.add_hook(weight_decay)
self.optimizer = optimizer
self.schedule = schedule
self.lr = lr
self.warmup_lr = warm_up_lr
self.momentum = momentum
self.weight_decay = weight_decay
示例8: create
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def create(self):
return optimizers.MomentumSGD(0.1)
示例9: get_optimizer
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import MomentumSGD [as 别名]
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None,
adam_beta2=None, adam_eps=None, weight_decay=None,
resume_opt=None):
if opt == 'MomentumSGD':
optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
elif opt == 'Adam':
optimizer = optimizers.Adam(
alpha=adam_alpha, beta1=adam_beta1,
beta2=adam_beta2, eps=adam_eps)
elif opt == 'AdaGrad':
optimizer = optimizers.AdaGrad(lr=lr)
elif opt == 'RMSprop':
optimizer = optimizers.RMSprop(lr=lr)
else:
raise Exception('No optimizer is selected')
# The first model as the master model
optimizer.setup(model)
if opt == 'MomentumSGD':
optimizer.add_hook(
chainer.optimizer.WeightDecay(weight_decay))
if resume_opt is not None:
serializers.load_npz(resume_opt, optimizer)
return optimizer