本文整理汇总了Python中ray.rllib.evaluation.policy_evaluator.PolicyEvaluator.sample方法的典型用法代码示例。如果您正苦于以下问题:Python PolicyEvaluator.sample方法的具体用法?Python PolicyEvaluator.sample怎么用?Python PolicyEvaluator.sample使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ray.rllib.evaluation.policy_evaluator.PolicyEvaluator
的用法示例。
在下文中一共展示了PolicyEvaluator.sample方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testFilterSync
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testFilterSync(self):
ev = PolicyEvaluator(
env_creator=lambda _: gym.make("CartPole-v0"),
policy_graph=MockPolicyGraph,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters["default"]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
示例2: testMetrics
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testMetrics(self):
ev = PolicyEvaluator(
env_creator=lambda _: MockEnv(episode_length=10),
policy_graph=MockPolicyGraph,
batch_mode="complete_episodes")
remote_ev = PolicyEvaluator.as_remote().remote(
env_creator=lambda _: MockEnv(episode_length=10),
policy_graph=MockPolicyGraph,
batch_mode="complete_episodes")
ev.sample()
ray.get(remote_ev.sample.remote())
result = collect_metrics(ev, [remote_ev])
self.assertEqual(result["episodes_this_iter"], 20)
self.assertEqual(result["episode_reward_mean"], 10)
示例3: testBatchesLargerWhenVectorized
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testBatchesLargerWhenVectorized(self):
ev = PolicyEvaluator(
env_creator=lambda _: MockEnv(episode_length=8),
policy_graph=MockPolicyGraph,
batch_mode="truncate_episodes",
batch_steps=4,
num_envs=4)
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
batch = ev.sample()
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 4)
示例4: testMultiAgentSampleRoundRobin
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testMultiAgentSampleRoundRobin(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(10)
ev = PolicyEvaluator(
env_creator=lambda _: RoundRobinMultiAgent(5, increment_obs=True),
policy_graph={
"p0": (MockPolicyGraph, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
# since we round robin introduce agents into the env, some of the env
# steps don't count as proper transitions
self.assertEqual(batch.policy_batches["p0"].count, 42)
self.assertEqual(batch.policy_batches["p0"]["obs"].tolist()[:10], [
one_hot(0, 10),
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["new_obs"].tolist()[:10], [
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
one_hot(5, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["rewards"].tolist()[:10],
[100, 100, 100, 100, 0] * 2)
self.assertEqual(batch.policy_batches["p0"]["dones"].tolist()[:10],
[False, False, False, False, True] * 2)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist()[:10],
[4, 9, 14, 19, 24, 5, 10, 15, 20, 25])
示例5: testCustomRNNStateValues
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testCustomRNNStateValues(self):
h = {"some": {"arbitrary": "structure", "here": [1, 2, 3]}}
class StatefulPolicyGraph(PolicyGraph):
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
return [0] * len(obs_batch), [[h] * len(obs_batch)], {}
def get_initial_state(self):
return [{}] # empty dict
ev = PolicyEvaluator(
env_creator=lambda _: gym.make("CartPole-v0"),
policy_graph=StatefulPolicyGraph,
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch["state_in_0"][0], {})
self.assertEqual(batch["state_out_0"][0], h)
self.assertEqual(batch["state_in_0"][1], h)
self.assertEqual(batch["state_out_0"][1], h)
示例6: testVectorEnvSupport
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testVectorEnvSupport(self):
ev = PolicyEvaluator(
env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),
policy_graph=MockPolicyGraph,
batch_mode="truncate_episodes",
batch_steps=10)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
示例7: testCompleteEpisodes
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testCompleteEpisodes(self):
ev = PolicyEvaluator(
env_creator=lambda _: MockEnv(10),
policy_graph=MockPolicyGraph,
batch_steps=5,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 10)
示例8: testExternalEnvHorizonNotSupported
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testExternalEnvHorizonNotSupported(self):
ev = PolicyEvaluator(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy_graph=MockPolicyGraph,
episode_horizon=20,
batch_steps=10,
batch_mode="complete_episodes")
self.assertRaises(ValueError, lambda: ev.sample())
示例9: testExternalEnvBadActions
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testExternalEnvBadActions(self):
ev = PolicyEvaluator(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy_graph=BadPolicyGraph,
sample_async=True,
batch_steps=40,
batch_mode="truncate_episodes")
self.assertRaises(Exception, lambda: ev.sample())
示例10: testAsync
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testAsync(self):
ev = PolicyEvaluator(
env_creator=lambda _: gym.make("CartPole-v0"),
sample_async=True,
policy_graph=MockPolicyGraph)
batch = ev.sample()
for key in ["obs", "actions", "rewards", "dones", "advantages"]:
self.assertIn(key, batch)
self.assertGreater(batch["advantages"][0], 1)
示例11: testExternalEnvTruncateEpisodes
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testExternalEnvTruncateEpisodes(self):
ev = PolicyEvaluator(
env_creator=lambda _: SimpleServing(MockEnv(25)),
policy_graph=MockPolicyGraph,
batch_steps=40,
batch_mode="truncate_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 40)
示例12: testRewardClipping
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testRewardClipping(self):
# clipping on
ev = PolicyEvaluator(
env_creator=lambda _: MockEnv2(episode_length=10),
policy_graph=MockPolicyGraph,
clip_rewards=True,
batch_mode="complete_episodes")
self.assertEqual(max(ev.sample()["rewards"]), 1)
result = collect_metrics(ev, [])
self.assertEqual(result["episode_reward_mean"], 1000)
# clipping off
ev2 = PolicyEvaluator(
env_creator=lambda _: MockEnv2(episode_length=10),
policy_graph=MockPolicyGraph,
clip_rewards=False,
batch_mode="complete_episodes")
self.assertEqual(max(ev2.sample()["rewards"]), 100)
result2 = collect_metrics(ev2, [])
self.assertEqual(result2["episode_reward_mean"], 1000)
示例13: testCompleteEpisodesPacking
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testCompleteEpisodesPacking(self):
ev = PolicyEvaluator(
env_creator=lambda _: MockEnv(10),
policy_graph=MockPolicyGraph,
batch_steps=15,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 20)
self.assertEqual(
batch["t"].tolist(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
示例14: testExternalEnvOffPolicy
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testExternalEnvOffPolicy(self):
ev = PolicyEvaluator(
env_creator=lambda _: SimpleOffPolicyServing(MockEnv(25), 42),
policy_graph=MockPolicyGraph,
batch_steps=40,
batch_mode="complete_episodes")
for _ in range(3):
batch = ev.sample()
self.assertEqual(batch.count, 50)
self.assertEqual(batch["actions"][0], 42)
self.assertEqual(batch["actions"][-1], 42)
示例15: testBaselinePerformance
# 需要导入模块: from ray.rllib.evaluation.policy_evaluator import PolicyEvaluator [as 别名]
# 或者: from ray.rllib.evaluation.policy_evaluator.PolicyEvaluator import sample [as 别名]
def testBaselinePerformance(self):
ev = PolicyEvaluator(
env_creator=lambda _: gym.make("CartPole-v0"),
policy_graph=MockPolicyGraph,
batch_steps=100)
start = time.time()
count = 0
while time.time() - start < 1:
count += ev.sample().count
print()
print("Samples per second {}".format(count / (time.time() - start)))
print()