当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.Adam方法代码示例

本文整理汇总了Python中chainer.optimizers.Adam方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.Adam方法的具体用法?Python optimizers.Adam怎么用?Python optimizers.Adam使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.optimizers的用法示例。


在下文中一共展示了optimizers.Adam方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_agent

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def make_agent(self, env, gpu):
        model = self.make_model(env)
        policy = model['policy']
        q_func = model['q_function']

        actor_opt = optimizers.Adam(alpha=1e-4)
        actor_opt.setup(policy)

        critic_opt = optimizers.Adam(alpha=1e-3)
        critic_opt.setup(q_func)

        explorer = self.make_explorer(env)
        rbuf = self.make_replay_buffer(env)
        return self.make_pgt_agent(env=env, model=model,
                                   actor_opt=actor_opt, critic_opt=critic_opt,
                                   explorer=explorer, rbuf=rbuf, gpu=gpu) 
开发者ID:chainer,项目名称:chainerrl,代码行数:18,代码来源:basetest_pgt.py

示例2: make_agent

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def make_agent(self, env, gpu):
        model = self.make_model(env)
        policy = model['policy']
        q_func = model['q_function']

        actor_opt = optimizers.Adam(alpha=1e-4)
        actor_opt.setup(policy)

        critic_opt = optimizers.Adam(alpha=1e-3)
        critic_opt.setup(q_func)

        explorer = self.make_explorer(env)
        rbuf = self.make_replay_buffer(env)
        return self.make_ddpg_agent(env=env, model=model,
                                    actor_opt=actor_opt, critic_opt=critic_opt,
                                    explorer=explorer, rbuf=rbuf, gpu=gpu) 
开发者ID:chainer,项目名称:chainerrl,代码行数:18,代码来源:basetest_ddpg.py

示例3: _test_load_rainbow

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def _test_load_rainbow(self, gpu):
        q_func = DistributionalDuelingDQN(4, 51, -10, 10)
        links.to_factorized_noisy(q_func, sigma_scale=0.5)
        explorer = explorers.Greedy()
        opt = chainer.optimizers.Adam(6.25e-5, eps=1.5 * 10 ** -4)
        opt.setup(q_func)
        rbuf = replay_buffer.ReplayBuffer(100)
        agent = agents.CategoricalDoubleDQN(
            q_func, opt, rbuf, gpu=gpu, gamma=0.99,
            explorer=explorer, minibatch_size=32,
            replay_start_size=50,
            target_update_interval=32000,
            update_interval=4,
            batch_accumulator='mean',
            phi=lambda x: x,
        )

        model, exists = download_model("Rainbow", "BreakoutNoFrameskip-v4",
                                       model_type=self.pretrained_type)
        agent.load(model)
        if os.environ.get('CHAINERRL_ASSERT_DOWNLOADED_MODEL_IS_CACHED'):
            assert exists 
开发者ID:chainer,项目名称:chainerrl,代码行数:24,代码来源:test_pretrained_models.py

示例4: test_adam_w

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def test_adam_w(self, backend_config):
        xp = backend_config.xp
        device = backend_config.device

        link = chainer.Link(x=(1,))
        link.to_device(device)

        opt = optimizers.Adam(eta=0.5, weight_decay_rate=0.1)
        opt.setup(link)

        link.x.data.fill(1)
        link.x.grad = device.send(xp.ones_like(link.x.data))

        opt.update()

        # compare against the value computed with v5 impl
        testing.assert_allclose(link.x.data, np.array([0.9495]),
                                atol=1e-7, rtol=1e-7) 
开发者ID:chainer,项目名称:chainer,代码行数:20,代码来源:test_optimizers.py

示例5: backprop_check

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def backprop_check():
	xp = cuda.cupy if config.use_gpu else np
	duel = DDQN()

	state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
	reward = [1, 0]
	action = [3, 4]
	episode_ends = [0, 0]
	next_state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)

	optimizer_conv = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
	optimizer_conv.setup(duel.conv)
	optimizer_fc = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
	optimizer_fc.setup(duel.fc)

	for i in xrange(10000):
		optimizer_conv.zero_grads()
		optimizer_fc.zero_grads()
		loss, _ = duel.forward_one_step(state, action, reward, next_state, episode_ends)
		loss.backward()
		optimizer_conv.update()
		optimizer_fc.update()
		print loss.data,
		print duel.conv.layer_2.W.data[0, 0, 0, 0],
		print duel.fc.layer_2.W.data[0, 0], 
开发者ID:musyoku,项目名称:double-dqn,代码行数:27,代码来源:grad_check.py

示例6: train

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def train(network, loss, X_tr, Y_tr, X_te, Y_te, n_epochs=30, gamma=1):
    model= Objective(network, loss=loss, gamma=gamma)

    #optimizer = optimizers.SGD()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train = tuple_dataset.TupleDataset(X_tr, Y_tr)
    test = tuple_dataset.TupleDataset(X_te, Y_te)

    train_iter = iterators.SerialIterator(train, batch_size=1, shuffle=True)
    test_iter = iterators.SerialIterator(test, batch_size=1, repeat=False,
                                         shuffle=False)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (n_epochs, 'epoch'))

    trainer.run() 
开发者ID:mblondel,项目名称:soft-dtw,代码行数:19,代码来源:plot_chainer_MLP.py

示例7: get_optimizer

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer 
开发者ID:pfnet-research,项目名称:chainer-segnet,代码行数:22,代码来源:train_utils.py

示例8: __init__

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def __init__(
            self,
            model_parameters,
            # Learning rate at training step s with annealing
            initial_lr=1e-4,
            final_lr=1e-5,
            annealing_steps=1600000,
            # Learning rate as used by the Adam algorithm
            beta_1=0.9,
            beta_2=0.99,
            # Adam regularisation parameter
            eps=1e-8,
            initial_training_step=0,
            communicator=None):
        self.initial_lr = initial_lr
        self.final_lr = final_lr
        self.annealing_steps = annealing_steps
        self.beta_1 = beta_1
        self.beta_2 = beta_2
        self.eps = eps

        lr = self.compute_lr_at_step(initial_training_step)
        self.optimizer = optimizers.Adam(
            lr, beta1=beta_1, beta2=beta_2, eps=eps)
        self.optimizer.setup(model_parameters)

        self.multi_node_optimizer = None
        if communicator:
            self.multi_node_optimizer = chainermn.create_multi_node_optimizer(
                self.optimizer, communicator) 
开发者ID:musyoku,项目名称:chainer-gqn,代码行数:32,代码来源:optimizer.py

示例9: make_agent

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def make_agent(self, env, gpu):
        model = self.make_model(env)

        opt = optimizers.Adam(alpha=3e-4)
        opt.setup(model)

        return self.make_a2c_agent(env=env, model=model, opt=opt, gpu=gpu,
                                   num_processes=self.num_processes) 
开发者ID:chainer,项目名称:chainerrl,代码行数:10,代码来源:test_a2c.py

示例10: make_agent

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def make_agent(self, env, gpu):
        policy, vf = self.make_model(env)

        if gpu >= 0:
            chainer.cuda.get_device_from_id(gpu).use()
            policy.to_gpu(gpu)
            vf.to_gpu(gpu)

        vf_opt = optimizers.Adam(alpha=1e-2)
        vf_opt.setup(vf)
        vf_opt.add_hook(chainer.optimizer_hooks.GradientClipping(1))

        if self.standardize_obs:
            obs_normalizer = chainerrl.links.EmpiricalNormalization(
                env.observation_space.low.size)
            if gpu >= 0:
                obs_normalizer.to_gpu(gpu)
        else:
            obs_normalizer = None

        agent = chainerrl.agents.TRPO(
            policy=policy,
            vf=vf,
            vf_optimizer=vf_opt,
            obs_normalizer=obs_normalizer,
            gamma=0.5,
            lambd=self.lambd,
            entropy_coef=self.entropy_coef,
            standardize_advantages=self.standardize_advantages,
            update_interval=64,
            vf_batch_size=32,
            act_deterministically=True,
            recurrent=self.recurrent,
        )

        return agent 
开发者ID:chainer,项目名称:chainerrl,代码行数:38,代码来源:test_trpo.py

示例11: create_agent

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def create_agent(self, env):
        model = agents.a3c.A3CSeparateModel(
            pi=create_stochastic_policy_for_env(env),
            v=create_v_function_for_env(env))
        opt = optimizers.Adam()
        opt.setup(model)
        return agents.A3C(model, opt, t_max=1, gamma=0.99) 
开发者ID:chainer,项目名称:chainerrl,代码行数:9,代码来源:test_agents.py

示例12: make_optimizer

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def make_optimizer(self, env, q_func):
        opt = optimizers.Adam(1e-2)
        opt.setup(q_func)
        return opt 
开发者ID:chainer,项目名称:chainerrl,代码行数:6,代码来源:basetest_dqn_like.py

示例13: get_args

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--seed', type=int, default=1701)
    parser.add_argument('--model', type=str)
    parser.add_argument('--param', type=str)
    parser.add_argument('--layer', type=str, default='conv1')
    parser.add_argument('--img_fn', type=str)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--opt', type=str, default='Adam')
    parser.add_argument('--in_size', type=int, default=64)
    parser.add_argument('--x0_sigma', type=str, default='data/x0_sigma.txt')
    parser.add_argument('--lambda_tv', type=float, default=0.5)  # 0.5
    parser.add_argument('--lambda_lp', type=float, default=4e-10)  # 4e-10
    parser.add_argument('--beta', type=float, default=2)
    parser.add_argument('--p', type=float, default=6)
    parser.add_argument('--adam_alpha', type=float, default=0.1)
    parser.add_argument('--channels', type=int, default=-1)
    parser.add_argument('--max_iter', type=int, default=10000)
    args = parser.parse_args()

    for line in open(args.x0_sigma):
        args.x0_sigma = float(line.strip())
        break

    np.random.seed(args.seed)

    return args 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:29,代码来源:invert.py

示例14: prepare_optimizer

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def prepare_optimizer(self):
        if self.args.opt == 'MomentumSGD':
            self.opt = optimizers.MomentumSGD(momentum=0.9)
        elif self.args.opt == 'Adam':
            self.opt = optimizers.Adam(alpha=self.args.adam_alpha)
            print('Adam alpha=', self.args.adam_alpha)
        else:
            raise ValueError('Opt should be MomentumSGD or Adam.')
        self.opt.setup(self.x_link) 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:11,代码来源:invert.py

示例15: get_model_optimizer

# 需要导入模块: from chainer import optimizers [as 别名]
# 或者: from chainer.optimizers import Adam [as 别名]
def get_model_optimizer(args):
    model = get_model(args)

    if 'opt' in args:
        # prepare optimizer
        if args.opt == 'MomentumSGD':
            optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
        elif args.opt == 'Adam':
            optimizer = optimizers.Adam(alpha=args.alpha)
        elif args.opt == 'AdaGrad':
            optimizer = optimizers.AdaGrad(lr=args.lr)
        else:
            raise Exception('No optimizer is selected')

        optimizer.setup(model)

        if args.opt == 'MomentumSGD':
            optimizer.add_hook(
                chainer.optimizer.WeightDecay(args.weight_decay))

        if args.resume_opt is not None:
            serializers.load_hdf5(args.resume_opt, optimizer)
            args.epoch_offset = int(
                re.search('epoch-([0-9]+)', args.resume_opt).groups()[0])

        return model, optimizer
    else:
        print('No optimizer generated.')
        return model 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:31,代码来源:train.py


注:本文中的chainer.optimizers.Adam方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。