當前位置: 首頁>>代碼示例>>Python>>正文


Python agents.DDPGAgent方法代碼示例

本文整理匯總了Python中rl.agents.DDPGAgent方法的典型用法代碼示例。如果您正苦於以下問題:Python agents.DDPGAgent方法的具體用法?Python agents.DDPGAgent怎麽用?Python agents.DDPGAgent使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在rl.agents的用法示例。


在下文中一共展示了agents.DDPGAgent方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_ddpg

# 需要導入模塊: from rl import agents [as 別名]
# 或者: from rl.agents import DDPGAgent [as 別名]
def test_ddpg():
    # TODO: replace this with a simpler environment where we can actually test if it finds a solution
    env = gym.make('Pendulum-v0')
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.shape[0]

    actor = Sequential()
    actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    actor.add(Dense(16))
    actor.add(Activation('relu'))
    actor.add(Dense(nb_actions))
    actor.add(Activation('linear'))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
    x = Dense(16)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('linear')(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)
    
    memory = SequentialMemory(limit=1000, window_length=1)
    random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3)
    agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                      memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50,
                      random_process=random_process, gamma=.99, target_model_update=1e-3)
    agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)])

    agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100)
    h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100)
    # TODO: evaluate history 
開發者ID:wau,項目名稱:keras-rl2,代碼行數:37,代碼來源:test_continuous.py

示例2: __init__

# 需要導入模塊: from rl import agents [as 別名]
# 或者: from rl.agents import DDPGAgent [as 別名]
def __init__(self, env, *args, **kwargs):
        super(KerasDDPGAgent, self).__init__(*args, **kwargs)
        self.env = env
        
        #assert len(env.action_space.shape) == 1
        #TODO: is there a way to output a tuple (6,1)
        nb_actions = sum(sum(1 for i in row if i) for row in self.env.action_space.sample())
        
        
        #TODO: terminology? feature or observation?
        observation = env.reset()
        
        print ">>>>>>>>>>>>>>>>>>>", observation.shape

        # TODO: find a way to customize network
        actor = Sequential()
        actor.add(Flatten(input_shape=(1,) + observation.shape))
        actor.add(Dense(16))
        actor.add(Activation('relu'))
        actor.add(Dense(16))
        actor.add(Activation('relu'))
        actor.add(Dense(16))
        actor.add(Activation('relu'))
        actor.add(Dense(nb_actions))
        actor.add(Activation('tanh'))
        actor.add(Lambda(lambda x: x * 3.14159))

        print(actor.summary())
        
        action_input = Input(shape=(nb_actions,), name='action_input')
        
        observation_input = Input(shape=(1,) + observation.shape, name='observation_input')
        flattened_observation = Flatten()(observation_input)
        x = merge([action_input, flattened_observation], mode='concat')
        x = Dense(32)(x)
        x = Activation('relu')(x)
        x = Dense(32)(x)
        x = Activation('relu')(x)
        x = Dense(32)(x)
        x = Activation('relu')(x)
        x = Dense(1)(x)
        x = Activation('linear')(x)
        critic = Model(input=[action_input, observation_input], output=x)
        print(critic.summary())
        
       
        memory = SequentialMemory(limit=500000, window_length=1)
        random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
        self.agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                          memory=memory, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
                          random_process=random_process, gamma=.99, target_model_update=1e-3)
        self.agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae']) 
開發者ID:jhu-lcsr,項目名稱:costar_plan,代碼行數:54,代碼來源:keras_ddpg.py


注:本文中的rl.agents.DDPGAgent方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。