本文整理汇总了Python中state.State.from_game方法的典型用法代码示例。如果您正苦于以下问题:Python State.from_game方法的具体用法?Python State.from_game怎么用?Python State.from_game使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类state.State
的用法示例。
在下文中一共展示了State.from_game方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delta_callback
# 需要导入模块: from state import State [as 别名]
# 或者: from state.State import from_game [as 别名]
def delta_callback(self, delta, new_gamestate):
future_game = new_gamestate.get_game_info(self.player_id)
print "delta_callback calle"
print("Size of Q: " + str(len(self.qlearning.q.hash_matrix)))
for (robot_loc, robot) in self.game.robots.items():
if hasattr(robot, 'robot_id') and robot.robot_id in self.robot_ids:
action = self.last_action[robot.robot_id]
for delta_me in delta:
if delta_me['loc'] == robot_loc:
future_state = State.from_game(future_game,
delta_me.loc_end,
self.player_id)
reward = self.reward(delta_me)
self.qlearning.learn(self.current_state, future_state,
action, reward)
示例2: _delta_callback
# 需要导入模块: from state import State [as 别名]
# 或者: from state.State import from_game [as 别名]
def _delta_callback(self, deltas, actions, new_state):
# detect new games
if new_state.turn > self.last_turn and self.last_state is not None:
game_info = new_state.get_game_info(self.player_id)
state_template = State.from_game(game_info, self.player_id)
for d in deltas:
# spawned robots have 0 hp and are not listed in actions
# we ignore them
if d.hp != 0:
assert d.loc in actions
action = q_learning.QLearning.to_hashable_action(
actions[d.loc])
state = copy.deepcopy(state_template)
state.robot_loc = d.loc
self.q[(state, action)] = qrobot.Robot.reward(d)
self.last_turn = new_state.turn
self.last_state = new_state
示例3: act
# 需要导入模块: from state import State [as 别名]
# 或者: from state.State import from_game [as 别名]
def act(self, game):
new_robot = self.robot_id in self.robot_ids
self.robot_ids.add(self.robot_id)
self.current_state = State.from_game(game, self.player_id,
robot_loc=self.location)
self.game = game
# Explore function
if random.randint(0, 3) < 1:
print("[Bot " + str(self.robot_id) + "] random action")
# print self.state
action = self.get_random_action()
else:
action = self.qlearning.predict(self.current_state)
self.last_states[self.robot_id] = self.current_state
self.last_action[self.robot_id] = action
return State.map_action(action, self.location)
示例4: test_loc_to_field_mapping
# 需要导入模块: from state import State [as 别名]
# 或者: from state.State import from_game [as 别名]
def test_loc_to_field_mapping(self):
gstate = gamestate.GameState()
friend_id = 1
enemy_id = 2
friends = [(1, 8), (3, 3), (17, 9), (9, 1), (17, 11)]
enemies = [(1, 9), (4, 3), (16, 9), (10, 1)]
robots = friends + enemies
for f_loc in friends:
gstate.add_robot(f_loc, friend_id)
for e_loc in enemies:
gstate.add_robot(e_loc, enemy_id)
game = gstate.get_game_info(friend_id)
s = State.from_game(game, friends[0], friend_id)
for f_loc in friends:
self.assertEqual(s.field(f_loc), State.FRIEND)
for e_loc in enemies:
self.assertEqual(s.field(e_loc), State.ENEMY)