本文整理汇总了Python中chainer.optimizers.RMSprop方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.RMSprop方法的具体用法?Python optimizers.RMSprop怎么用?Python optimizers.RMSprop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.optimizers
的用法示例。
在下文中一共展示了optimizers.RMSprop方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_optimizer
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
adam_beta2=None, adam_eps=None, weight_decay=None):
if opt == 'MomentumSGD':
optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
elif opt == 'Adam':
optimizer = optimizers.Adam(
alpha=adam_alpha, beta1=adam_beta1,
beta2=adam_beta2, eps=adam_eps)
elif opt == 'AdaGrad':
optimizer = optimizers.AdaGrad(lr=lr)
elif opt == 'RMSprop':
optimizer = optimizers.RMSprop(lr=lr)
else:
raise Exception('No optimizer is selected')
# The first model as the master model
if opt == 'MomentumSGD':
optimizer.decay = weight_decay
return optimizer
示例2: test_share_states
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def test_share_states(self):
model = L.Linear(2, 2)
opt_a = optimizers.RMSprop()
opt_a.setup(model)
arrays = async_.share_states_as_shared_arrays(opt_a)
opt_b = optimizers.RMSprop()
opt_b.setup(copy.deepcopy(model))
# In Chainer v2, a model cannot be set up by two optimizers or more.
opt_c = optimizers.RMSprop()
opt_c.setup(copy.deepcopy(model))
"""
Removed the tests by assert_different_pointers
since they are trivial now.
"""
async_.set_shared_states(opt_b, arrays)
async_.set_shared_states(opt_c, arrays)
def assert_same_pointers(a, b):
a = a.target
b = b.target
for param_name, param_a in a.namedparams():
param_b = dict(b.namedparams())[param_name]
state_a = param_a.update_rule.state
state_b = param_b.update_rule.state
self.assertTrue(state_a)
self.assertTrue(state_b)
for state_name, state_val_a in state_a.items():
state_val_b = state_b[state_name]
self.assertTrue(isinstance(
state_val_a, np.ndarray))
self.assertTrue(isinstance(
state_val_b, np.ndarray))
self.assertEqual(state_val_a.ctypes.data,
state_val_b.ctypes.data)
assert_same_pointers(opt_a, opt_b)
assert_same_pointers(opt_a, opt_c)
示例3: create
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def create(self):
kwargs = {'eps_inside_sqrt': self.eps_inside_sqrt}
if self.dtype == numpy.float16:
kwargs['eps'] = 1e-6
return optimizers.RMSprop(0.1, **kwargs)
示例4: set_params
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def set_params(self, params):
self.gpu = params.get('gpu',False)
self.learning_rate = params.get('learning_rate',0.00025)
self.decay_rate = params.get('decay_rate',0.95)
self.discount = params.get('discount',0.95)
self.clip_err = params.get('clip_err',False)
self.target_net_update = params.get('target_net_update',10000)
self.double_DQN = params.get('double_DQN',False)
# setting up various possible gradient update algorithms
opt = params.get('optim_name', 'ADAM')
if opt == 'RMSprop':
self.optimizer = optimizers.RMSprop(lr=self.learning_rate, alpha=self.decay_rate)
elif opt == 'ADADELTA':
print("Supplied learning rate not used with ADADELTA gradient update method")
self.optimizer = optimizers.AdaDelta()
elif opt == 'ADAM':
self.optimizer = optimizers.Adam(alpha=self.learning_rate)
elif opt == 'SGD':
self.optimizer = optimizers.SGD(lr=self.learning_rate)
else:
print('The requested optimizer is not supported!!!')
exit()
if self.clip_err is not False:
self.optimizer.add_hook(chainer.optimizer.GradientClipping(self.clip_err))
self.optim_name = params['optim_name']
示例5: __init__
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
super(OptimizerGooglenetV3, self).__init__(model)
optimizer = optimizers.RMSprop(lr, decay, eps)
weight_decay = chainer.optimizer.WeightDecay(weight_decay)
clip = chainer.optimizer.GradientClipping(clip)
optimizer.setup(self.model)
optimizer.add_hook(weight_decay)
optimizer.add_hook(clip)
self.optimizer = optimizer
开发者ID:nutszebra,项目名称:neural_architecture_search_with_reinforcement_learning_appendix_a,代码行数:11,代码来源:nutszebra_optimizer.py
示例6: get_optimizer
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None,
adam_beta2=None, adam_eps=None, weight_decay=None,
resume_opt=None):
if opt == 'MomentumSGD':
optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
elif opt == 'Adam':
optimizer = optimizers.Adam(
alpha=adam_alpha, beta1=adam_beta1,
beta2=adam_beta2, eps=adam_eps)
elif opt == 'AdaGrad':
optimizer = optimizers.AdaGrad(lr=lr)
elif opt == 'RMSprop':
optimizer = optimizers.RMSprop(lr=lr)
else:
raise Exception('No optimizer is selected')
# The first model as the master model
optimizer.setup(model)
if opt == 'MomentumSGD':
optimizer.add_hook(
chainer.optimizer.WeightDecay(weight_decay))
if resume_opt is not None:
serializers.load_npz(resume_opt, optimizer)
return optimizer
示例7: train
# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import RMSprop [as 别名]
def train(args):
nz = args.nz
batch_size = args.batch_size
epochs = args.epochs
gpu = args.gpu
# CIFAR-10 images in range [-1, 1] (tanh generator outputs)
train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2)
train -= 1.0
train_iter = iterators.SerialIterator(train, batch_size)
z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz),
batch_size)
optimizer_generator = optimizers.RMSprop(lr=0.00005)
optimizer_critic = optimizers.RMSprop(lr=0.00005)
optimizer_generator.setup(Generator())
optimizer_critic.setup(Critic())
updater = WassersteinGANUpdater(
iterator=train_iter,
noise_iterator=z_iter,
optimizer_generator=optimizer_generator,
optimizer_critic=optimizer_critic,
device=gpu)
trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch'))
trainer.extend(extensions.ProgressBar())
trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))
trainer.extend(GeneratorSample(), trigger=(1, 'epoch'))
trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss',
'critic/loss/real', 'critic/loss/fake', 'generator/loss']))
trainer.run()