本文整理汇总了Python中pysc2.env.sc2_env.SC2Env方法的典型用法代码示例。如果您正苦于以下问题:Python sc2_env.SC2Env方法的具体用法?Python sc2_env.SC2Env怎么用?Python sc2_env.SC2Env使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pysc2.env.sc2_env
的用法示例。
在下文中一共展示了sc2_env.SC2Env方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def __init__(self,name,trainer,model_path,global_episodes):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_"+str(self.number))
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name,trainer)
self.update_local_ops = update_target_graph('global',self.name)
self.env = sc2_env.SC2Env(map_name="DefeatRoaches")
示例2: __init__
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def __init__(self,name,trainer,model_path,global_episodes, map_name, action_spec, observation_spec):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_"+str(self.number))
# Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name,trainer,action_spec,observation_spec)
self.update_local_ops = update_target_graph('global',self.name)
print('Initializing environment #{}...'.format(self.number))
self.env = sc2_env.SC2Env(map_name=map_name)
self.action_spec = action_spec
self.observation_spec = observation_spec
示例3: __init__
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def __init__(self, name, trainer, model_path, global_episodes, global_steps, map_name, action_spec, observation_spec):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment_global_episodes = self.global_episodes.assign_add(1)
self.global_steps = global_steps
self.increment_global_steps = self.global_steps.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_" + str(self.number))
self.action_spec = action_spec
self.observation_spec = observation_spec
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name, trainer, action_spec, observation_spec)
self.update_local_ops = update_target_graph('global', self.name)
print('Initializing environment #{}...'.format(self.number))
self.env = sc2_env.SC2Env(map_name=map_name)
示例4: run_thread
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def run_thread(agent_classes, players, map_name, visualize):
"""Run one thread worth of the environment with agents."""
with sc2_env.SC2Env(
map_name=map_name,
battle_net_map=FLAGS.battle_net_map,
players=players,
agent_interface_format=sc2_env.parse_agent_interface_format(
feature_screen=FLAGS.feature_screen_size,
feature_minimap=FLAGS.feature_minimap_size,
rgb_screen=FLAGS.rgb_screen_size,
rgb_minimap=FLAGS.rgb_minimap_size,
action_space=FLAGS.action_space,
use_feature_units=FLAGS.use_feature_units,
use_raw_units=FLAGS.use_raw_units),
step_mul=FLAGS.step_mul,
game_steps_per_episode=FLAGS.game_steps_per_episode,
disable_fog=FLAGS.disable_fog,
visualize=visualize) as env:
env = available_actions_printer.AvailableActionsPrinter(env)
agents = [agent_cls() for agent_cls in agent_classes]
run_loop.run_loop(agents, env, FLAGS.max_agent_steps, FLAGS.max_episodes)
if FLAGS.save_replay:
env.save_replay(agent_classes[0].__name__)
示例5: test_move_to_beacon
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_move_to_beacon(self):
with sc2_env.SC2Env(
map_name="MoveToBeacon",
players=[sc2_env.Agent(sc2_env.Race.terran)],
agent_interface_format=sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=84,
minimap=64)),
step_mul=self.step_mul,
game_steps_per_episode=self.steps * self.step_mul) as env:
agent = scripted_agent.MoveToBeacon()
run_loop.run_loop([agent], env, self.steps)
# Get some points
self.assertLessEqual(agent.episodes, agent.reward)
self.assertEqual(agent.steps, self.steps)
示例6: test_collect_mineral_shards_feature_units
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_collect_mineral_shards_feature_units(self):
with sc2_env.SC2Env(
map_name="CollectMineralShards",
players=[sc2_env.Agent(sc2_env.Race.terran)],
agent_interface_format=sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=84,
minimap=64),
use_feature_units=True),
step_mul=self.step_mul,
game_steps_per_episode=self.steps * self.step_mul) as env:
agent = scripted_agent.CollectMineralShardsFeatureUnits()
run_loop.run_loop([agent], env, self.steps)
# Get some points
self.assertLessEqual(agent.episodes, agent.reward)
self.assertEqual(agent.steps, self.steps)
示例7: test_defeat_roaches
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_defeat_roaches(self):
with sc2_env.SC2Env(
map_name="DefeatRoaches",
players=[sc2_env.Agent(sc2_env.Race.terran)],
agent_interface_format=sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=84,
minimap=64)),
step_mul=self.step_mul,
game_steps_per_episode=self.steps * self.step_mul) as env:
agent = scripted_agent.DefeatRoaches()
run_loop.run_loop([agent], env, self.steps)
# Get some points
self.assertLessEqual(agent.episodes, agent.reward)
self.assertEqual(agent.steps, self.steps)
示例8: test_respects_override
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_respects_override(self):
with sc2_env.SC2Env(
map_name="DefeatRoaches",
players=[sc2_env.Agent(sc2_env.Race.random)],
step_mul=1,
agent_interface_format=AGENT_INTERFACE_FORMAT) as env:
expected_game_loop = 0
for delta in range(10):
timestep = env.step(
actions=[actions.FUNCTIONS.no_op()],
step_mul=delta)
expected_game_loop += delta
self.assertEqual(
timestep[0].observation.game_loop[0],
expected_game_loop)
示例9: test_random_agent
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_random_agent(self, agent_interface_format):
steps = 250
step_mul = 8
with sc2_env.SC2Env(
map_name=["Simple64", "Simple96"],
players=[sc2_env.Agent([sc2_env.Race.random, sc2_env.Race.terran]),
sc2_env.Bot([sc2_env.Race.zerg, sc2_env.Race.protoss],
sc2_env.Difficulty.easy,
[sc2_env.BotBuild.rush, sc2_env.BotBuild.timing])],
agent_interface_format=agent_interface_format,
step_mul=step_mul,
game_steps_per_episode=steps * step_mul//3) as env:
agent = random_agent.RandomAgent()
run_loop.run_loop([agent], env, steps)
self.assertEqual(agent.steps, steps)
示例10: test_defeat_zerglings
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_defeat_zerglings(self):
FLAGS(sys.argv)
with sc2_env.SC2Env(
"DefeatZerglingsAndBanelings",
step_mul=self.step_mul,
visualize=True,
game_steps_per_episode=self.steps * self.step_mul) as env:
obs = env.step(actions=[sc2_actions.FunctionCall(_NO_OP, [])])
player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE]
# Break Point!!
print(player_relative)
agent = random_agent.RandomAgent()
run_loop.run_loop([agent], env, self.steps)
self.assertEqual(agent.steps, self.steps)
示例11: main
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def main(unused_argv):
agent = RawAgent()
try:
while True:
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.protoss),
sc2_env.Bot(sc2_env.Race.protoss,
sc2_env.Difficulty.very_easy)],
agent_interface_format=features.AgentInterfaceFormat(
action_space=actions.ActionSpace.RAW,
use_raw_units=True,
raw_resolution=64,
),
) as env:
run_loop.run_loop([agent], env)
except KeyboardInterrupt:
pass
示例12: main
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def main(unused_argv):
agent1 = SmartAgent()
agent2 = RandomAgent()
try:
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.terran),
sc2_env.Agent(sc2_env.Race.terran)],
agent_interface_format=features.AgentInterfaceFormat(
action_space=actions.ActionSpace.RAW,
use_raw_units=True,
raw_resolution=64,
),
step_mul=48,
disable_fog=True,
) as env:
run_loop.run_loop([agent1, agent2], env, max_episodes=1000)
except KeyboardInterrupt:
pass
示例13: main
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def main(unused_argv):
bm = BattleManager()
agent1 = PredictorAgent(bm)
agent2 = EnemyAgent(bm)
try:
with sc2_env.SC2Env(
map_name="Flat128",
players=[sc2_env.Agent(sc2_env.Race.terran),
sc2_env.Agent(sc2_env.Race.terran)],
agent_interface_format=features.AgentInterfaceFormat(
action_space=actions.ActionSpace.RAW,
use_raw_units=True,
raw_resolution=64,
),
step_mul=128,
disable_fog=True,
) as env:
run_loop.run_loop([agent1, agent2], env, max_episodes=20)
except KeyboardInterrupt:
pass
示例14: start
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def start(self):
# importing here to lazy-load
from pysc2.env import sc2_env
# fail-safe if executed not as absl app
if not flags.FLAGS.is_parsed():
flags.FLAGS(sys.argv)
self._env = sc2_env.SC2Env(
map_name=self.id,
visualize=self.render,
agent_interface_format=[features.parse_agent_interface_format(
feature_screen=self.spatial_dim,
feature_minimap=self.spatial_dim,
rgb_screen=None,
rgb_minimap=None
)],
step_mul=self.step_mul,
players=[sc2_env.Agent(sc2_env.Race.terran)])
示例15: test_defeat_zerglings
# 需要导入模块: from pysc2.env import sc2_env [as 别名]
# 或者: from pysc2.env.sc2_env import SC2Env [as 别名]
def test_defeat_zerglings(self):
agent_format = sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=(32,32),
minimap=(32,32),
)
)
with sc2_env.SC2Env(
map_name="DefeatZerglingsAndBanelings",
step_mul=self.step_mul,
visualize=True,
agent_interface_format=[agent_format],
game_steps_per_episode=self.steps * self.step_mul) as env:
obs = env.step(actions=[sc2_actions.FunctionCall(_NO_OP, [])])
player_relative = obs[0].observation["feature_screen"][_PLAYER_RELATIVE]
# Break Point!!
print(player_relative)
agent = random_agent.RandomAgent()
run_loop.run_loop([agent], env, self.steps)
self.assertEqual(agent.steps, self.steps)