本文整理匯總了Python中baselines.deepq.learn方法的典型用法代碼示例。如果您正苦於以下問題:Python deepq.learn方法的具體用法?Python deepq.learn怎麽用?Python deepq.learn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類baselines.deepq
的用法示例。
在下文中一共展示了deepq.learn方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = RacecarGymEnv(renders=False,isDiscrete=True)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=10000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to racecar_model.pkl")
act.save("racecar_model.pkl")
示例2: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = RacecarZEDGymEnv(renders=False, isDiscrete=True)
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=False
)
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=10000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to racecar_zed_model.pkl")
act.save("racecar_zed_model.pkl")
示例3: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = KukaCamGymEnv(renders=False, isDiscrete=True)
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=False
)
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=10000000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to kuka_cam_model.pkl")
act.save("kuka_cam_model.pkl")
示例4: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = CartPoleBulletEnv(renders=False)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
示例5: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = KukaGymEnv(renders=False, isDiscrete=True)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=10000000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to kuka_model.pkl")
act.save("kuka_model.pkl")
示例6: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = gym.make("MountainCar-v0")
# Enabling layer_norm here is import for parameter space noise!
model = deepq.models.mlp([64], layer_norm=True)
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.1,
print_freq=10,
param_noise=True
)
print("Saving model to mountaincar_model.pkl")
act.save("mountaincar_model.pkl")
示例7: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = gym.make("CartPole-v0")
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
示例8: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = gym.make("PongNoFrameskip-v4")
env = ScaledFloatFrame(wrap_dqn(env))
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True
)
act = deepq.learn(
env,
q_func=model,
lr=1e-4,
max_timesteps=2000000,
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True
)
act.save("pong_model.pkl")
env.close()
示例9: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = gym.make("MountainCar-v0")
# Enabling layer_norm here is import for parameter space noise!
act = deepq.learn(
env,
network=models.mlp(num_hidden=64, num_layers=1),
lr=1e-3,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.1,
print_freq=10,
param_noise=True
)
print("Saving model to mountaincar_model.pkl")
act.save("mountaincar_model.pkl")
示例10: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = gym.make("MountainCar-v0")
act = deepq.learn(
env,
network=models.mlp(num_layers=1, num_hidden=64),
total_timesteps=0,
load_path='mountaincar_model.pkl'
)
while True:
obs, done = env.reset(), False
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
示例11: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
logger.configure()
env = make_atari('PongNoFrameskip-v4')
env = bench.Monitor(env, logger.get_dir())
env = deepq.wrap_atari_dqn(env)
model = deepq.learn(
env,
"conv_only",
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
lr=1e-4,
total_timesteps=int(1e7),
buffer_size=10000,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=10000,
target_network_update_freq=1000,
gamma=0.99,
)
model.save('pong_model.pkl')
env.close()
示例12: main
# 需要導入模塊: from baselines import deepq [as 別名]
# 或者: from baselines.deepq import learn [as 別名]
def main():
env = gym.make("CartPole-v1")
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to final_models/cartpole_model.pkl")
act.save("final_models/cartpole_model.pkl")