本文整理汇总了Python中baselines.common.atari_wrappers_deprecated.wrap_dqn方法的典型用法代码示例。如果您正苦于以下问题:Python atari_wrappers_deprecated.wrap_dqn方法的具体用法?Python atari_wrappers_deprecated.wrap_dqn怎么用?Python atari_wrappers_deprecated.wrap_dqn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类baselines.common.atari_wrappers_deprecated
的用法示例。
在下文中一共展示了atari_wrappers_deprecated.wrap_dqn方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def main():
env = gym.make("PongNoFrameskip-v4")
env = ScaledFloatFrame(wrap_dqn(env))
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True
)
act = deepq.learn(
env,
q_func=model,
lr=1e-4,
max_timesteps=2000000,
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True
)
act.save("pong_model.pkl")
env.close()
示例2: main
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def main():
if False:
# deterministic version 4 results in a frame skip of 4 and no repeat action probability
environment = gym.make('BreakoutDeterministic-v4')
environment = TerminateOnEndOfLifeWrapper(environment)
environment = ReshapeWrapper(environment)
environment = ClipRewardWrapper(environment)
environment = RepeatWrapper(environment, frames=4)
else:
# use the environment wrappers found in openai baselines.
environment = gym.make('BreakoutNoFrameskip-v4')
environment = wrap_dqn(environment)
environment = DimShuffleWrapper(environment)
# todo: perhaps these should be defined in the environment itself
state_axes = ng.make_axes([
ng.make_axis(environment.observation_space.shape[0], name='C'),
ng.make_axis(environment.observation_space.shape[1], name='H'),
ng.make_axis(environment.observation_space.shape[2], name='W'),
])
agent = dqn.Agent(
state_axes,
environment.action_space,
model=model,
epsilon=dqn.linear_generator(start=1.0, end=0.1, steps=1000000),
gamma=0.99,
learning_rate=0.00025,
memory=dqn.Memory(maxlen=1000000),
target_network_update_frequency=1000,
learning_starts=10000,
)
rl_loop.rl_loop_train(environment, agent, episodes=200000)
示例3: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
env_monitored = SimpleMonitor(env)
env = wrap_dqn(env_monitored)
return env_monitored, env
示例4: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
env = SimpleMonitor(env)
env = wrap_dqn(env)
return env
示例5: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = SimpleMonitor(env) # puts rewards and number of steps in info, before environment is wrapped
env = wrap_dqn(monitored_env) # applies a bunch of modification to simplify the observation space (downsample, make b/w)
return env, monitored_env
示例6: main
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def main():
env = gym.make("PongNoFrameskip-v4")
env = ScaledFloatFrame(wrap_dqn(env))
act = deepq.load("pong_model.pkl")
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
示例7: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
env_monitored = bench.Monitor(env, None)
env = wrap_dqn(env_monitored)
return env_monitored, env
示例8: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
env = bench.Monitor(env, None)
env = wrap_dqn(env)
return env
示例9: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = bench.SimpleMonitor(env, logger.get_dir()) # puts rewards and number of steps in info, before environment is wrapped
env = wrap_dqn(monitored_env) # applies a bunch of modification to simplify the observation space (downsample, make b/w)
return env, monitored_env
示例10: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = bench.SimpleMonitor(env) # puts rewards and number of steps in info, before environment is wrapped
env = wrap_dqn(monitored_env)
return env, monitored_env
示例11: make_env
# 需要导入模块: from baselines.common import atari_wrappers_deprecated [as 别名]
# 或者: from baselines.common.atari_wrappers_deprecated import wrap_dqn [as 别名]
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = bench.Monitor(env, logger.get_dir()) # puts rewards and number of steps in info, before environment is wrapped
env = wrap_dqn(monitored_env) # applies a bunch of modification to simplify the observation space (downsample, make b/w)
return env, monitored_env