當前位置: 首頁>>代碼示例>>Python>>正文


Python optimizers.RMSprop方法代碼示例

本文整理匯總了Python中chainer.optimizers.RMSprop方法的典型用法代碼示例。如果您正苦於以下問題:Python optimizers.RMSprop方法的具體用法?Python optimizers.RMSprop怎麽用?Python optimizers.RMSprop使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在chainer.optimizers的用法示例。


在下文中一共展示了optimizers.RMSprop方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_optimizer

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer 
開發者ID:pfnet-research,項目名稱:chainer-segnet,代碼行數:22,代碼來源:train_utils.py

示例2: test_share_states

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def test_share_states(self):

        model = L.Linear(2, 2)
        opt_a = optimizers.RMSprop()
        opt_a.setup(model)
        arrays = async_.share_states_as_shared_arrays(opt_a)
        opt_b = optimizers.RMSprop()
        opt_b.setup(copy.deepcopy(model))
        # In Chainer v2, a model cannot be set up by two optimizers or more.

        opt_c = optimizers.RMSprop()
        opt_c.setup(copy.deepcopy(model))

        """
        Removed the tests by assert_different_pointers
        since they are trivial now.
        """

        async_.set_shared_states(opt_b, arrays)
        async_.set_shared_states(opt_c, arrays)

        def assert_same_pointers(a, b):
            a = a.target
            b = b.target
            for param_name, param_a in a.namedparams():
                param_b = dict(b.namedparams())[param_name]
                state_a = param_a.update_rule.state
                state_b = param_b.update_rule.state
                self.assertTrue(state_a)
                self.assertTrue(state_b)
                for state_name, state_val_a in state_a.items():
                    state_val_b = state_b[state_name]
                    self.assertTrue(isinstance(
                        state_val_a, np.ndarray))
                    self.assertTrue(isinstance(
                        state_val_b, np.ndarray))
                    self.assertEqual(state_val_a.ctypes.data,
                                     state_val_b.ctypes.data)

        assert_same_pointers(opt_a, opt_b)
        assert_same_pointers(opt_a, opt_c) 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:43,代碼來源:test_async.py

示例3: create

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def create(self):
        kwargs = {'eps_inside_sqrt': self.eps_inside_sqrt}
        if self.dtype == numpy.float16:
            kwargs['eps'] = 1e-6
        return optimizers.RMSprop(0.1, **kwargs) 
開發者ID:chainer,項目名稱:chainer,代碼行數:7,代碼來源:test_optimizers_by_linear_model.py

示例4: set_params

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def set_params(self, params):

        self.gpu = params.get('gpu',False)
        self.learning_rate = params.get('learning_rate',0.00025)
        self.decay_rate = params.get('decay_rate',0.95)
        self.discount = params.get('discount',0.95)
        self.clip_err = params.get('clip_err',False)
        self.target_net_update = params.get('target_net_update',10000)
        self.double_DQN = params.get('double_DQN',False)

        # setting up various possible gradient update algorithms
        opt = params.get('optim_name', 'ADAM')
        if opt == 'RMSprop':
            self.optimizer = optimizers.RMSprop(lr=self.learning_rate, alpha=self.decay_rate)

        elif opt == 'ADADELTA':
            print("Supplied learning rate not used with ADADELTA gradient update method")
            self.optimizer = optimizers.AdaDelta()

        elif opt == 'ADAM':
            self.optimizer = optimizers.Adam(alpha=self.learning_rate)

        elif opt == 'SGD':
            self.optimizer = optimizers.SGD(lr=self.learning_rate)

        else:
            print('The requested optimizer is not supported!!!')
            exit()

        if self.clip_err is not False:
            self.optimizer.add_hook(chainer.optimizer.GradientClipping(self.clip_err))

        self.optim_name = params['optim_name'] 
開發者ID:sisl,項目名稱:Chimp,代碼行數:35,代碼來源:chainer_backend.py

示例5: __init__

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer 
開發者ID:nutszebra,項目名稱:neural_architecture_search_with_reinforcement_learning_appendix_a,代碼行數:11,代碼來源:nutszebra_optimizer.py

示例6: get_optimizer

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None,
                  resume_opt=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    optimizer.setup(model)

    if opt == 'MomentumSGD':
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(weight_decay))

    if resume_opt is not None:
        serializers.load_npz(resume_opt, optimizer)

    return optimizer 
開發者ID:mitmul,項目名稱:deeppose,代碼行數:29,代碼來源:train.py

示例7: train

# 需要導入模塊: from chainer import optimizers [as 別名]
# 或者: from chainer.optimizers import RMSprop [as 別名]
def train(args):
    nz = args.nz
    batch_size = args.batch_size
    epochs = args.epochs
    gpu = args.gpu

    # CIFAR-10 images in range [-1, 1] (tanh generator outputs)
    train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2)
    train -= 1.0
    train_iter = iterators.SerialIterator(train, batch_size)

    z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz),
                                 batch_size)

    optimizer_generator = optimizers.RMSprop(lr=0.00005)
    optimizer_critic = optimizers.RMSprop(lr=0.00005)
    optimizer_generator.setup(Generator())
    optimizer_critic.setup(Critic())

    updater = WassersteinGANUpdater(
        iterator=train_iter,
        noise_iterator=z_iter,
        optimizer_generator=optimizer_generator,
        optimizer_critic=optimizer_critic,
        device=gpu)

    trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch'))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))
    trainer.extend(GeneratorSample(), trigger=(1, 'epoch'))
    trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss',
            'critic/loss/real', 'critic/loss/fake', 'generator/loss']))
    trainer.run() 
開發者ID:hvy,項目名稱:chainer-wasserstein-gan,代碼行數:35,代碼來源:train.py


注:本文中的chainer.optimizers.RMSprop方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。