本文整理汇总了Python中OpenNero类的典型用法代码示例。如果您正苦于以下问题:Python OpenNero类的具体用法?Python OpenNero怎么用?Python OpenNero使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OpenNero类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: is_episode_over
def is_episode_over(self, agent):
"""
is the current episode over for the agent?
"""
if agent.group == 'Turret':
return False
team = agent.get_team()
state = self.get_state(agent)
dead = self.hitpoints > 0 and state.total_damage >= self.hitpoints
old = self.lifetime > 0 and agent.step > 0 and 0 == agent.step % self.lifetime
if agent.ai == 'qlearning':
if dead or old:
# simulate a respawn by moving this agent towards the spawn location.
state.total_damage = 0
state.randomize()
agent.state.position = copy.copy(state.initial_position)
agent.state.rotation = copy.copy(state.initial_rotation)
agent.teleport()
return False
rtneat = OpenNero.get_ai("rtneat-%s" % team)
if not rtneat:
rtneat = OpenNero.get_ai("rtneatq-%s" % team)
orphaned = rtneat and not rtneat.has_organism(agent)
return orphaned or dead or old
示例2: load_team
def load_team(self, location, team=constants.OBJECT_TYPE_TEAM_0):
NERO.module.NeroModule.load_team(self, location, team)
rtneat = OpenNero.get_ai('rtneat-%s' % team)
if rtneat:
rtneat.set_lifetime(sys.maxint)
rtneat.disable_evolution()
OpenNero.disable_ai() # don't run until button
示例3: setup_sandbox
def setup_sandbox(self):
"""
setup the sandbox environment
"""
OpenNero.getSimContext().delay = 0.0
self.environment = RoombaEnvironment(constants.XDIM, constants.YDIM)
OpenNero.set_environment(self.environment)
示例4: Match
def Match(team0, team1):
'''Run a single battle between two population files.'''
mod = module.getMod()
mod.load_team(team0, constants.OBJECT_TYPE_TEAM_0)
mod.load_team(team1, constants.OBJECT_TYPE_TEAM_1)
mod.set_speedup(100)
OpenNero.enable_ai()
示例5: set_environment
def set_environment(self, env):
self.environment = env
for id in self.wall_ids: # delete the walls
common.removeObject(id)
del self.wall_ids[:] # clear the ids
OpenNero.set_environment(env)
common.addObject(
"data/shapes/cube/WhiteCube.xml",
OpenNero.Vector3f(1 * constants.GRID_DX, 2 * constants.GRID_DY, 0 * constants.GRID_DZ),
OpenNero.Vector3f(0, 0, 0),
scale=OpenNero.Vector3f(0.25, 0.25, 4),
)
common.addObject(
"data/shapes/cube/WhiteCube.xml",
OpenNero.Vector3f(2 * constants.GRID_DX, 2 * constants.GRID_DY, 0 * constants.GRID_DZ),
OpenNero.Vector3f(0, 0, 0),
scale=OpenNero.Vector3f(0.25, 0.25, 4),
)
common.addObject(
"data/shapes/cube/WhiteCube.xml",
OpenNero.Vector3f(3 * constants.GRID_DX, 2 * constants.GRID_DY, 0 * constants.GRID_DZ),
OpenNero.Vector3f(0, 0, 0),
scale=OpenNero.Vector3f(0.25, 0.25, 4),
)
示例6: snapshot
def snapshot(self):
print 'snapshot was called'
if os.access('Hw5/snapshots/color/', os.W_OK):
filename = 'Hw5/snapshots/color/' + str(time.time()*100)[:-2] + '.png'
OpenNero.getSimContext().getActiveCamera().snapshot(filename)
# Launch python script to show this image
os.system('python Hw5/show_image.py "' + filename + '"')
示例7: load_rtneat
def load_rtneat(self, location , pop, team=constants.OBJECT_TYPE_TEAM_0):
location = os.path.relpath("/") + location
if os.path.exists(location):
OpenNero.set_ai("rtneat-%s" % team, OpenNero.RTNEAT(
str(location), "data/ai/neat-params.dat",
constants.pop_size,
OpenNero.get_environment().agent_info.reward))
示例8: set_display_hint
def set_display_hint(self):
"""
set the display hint above the agent's head (toggled with F2)
"""
display_hint = constants.getDisplayHint()
if display_hint:
if display_hint == 'fitness':
self.state.label = '%.2f' % self.org.fitness
elif display_hint == 'time alive':
self.state.label = str(self.org.time_alive)
elif display_hint == 'hit points':
self.state.label = ''.join('.' for i in range(int(5*OpenNero.get_environment().get_hitpoints(self))))
elif display_hint == 'id':
self.state.label = str(self.org.genome.id)
elif display_hint == 'champion':
if self.org.champion:
self.state.label = 'champ!'
else:
self.state.label = ''
elif display_hint == 'rank':
self.state.label = str(self.org.rank)
elif display_hint == 'debug':
self.state.label = str(OpenNero.get_environment().get_state(self))
else:
self.state.label = '?'
else:
# the first time we switch away from displaying stuff,
# change the window caption
if self.state.label:
self.state.label = ""
示例9: ltChange
def ltChange(self, value):
self.environment.lifetime = value
for team in constants.TEAMS:
rtneat = OpenNero.get_ai("rtneat-%s" % team)
if not rtneat:
rtneat = OpenNero.get_ai("rtneatq-%s" % team)
if rtneat:
rtneat.set_lifetime(value)
示例10: set_weight
def set_weight(self, key, value):
self.reward_weights[key] = value
for team in self.teams:
rtneat = OpenNero.get_ai("rtneat-%s" % team)
if not rtneat:
rtneat = OpenNero.get_ai("rtneatq-%s" % team)
if rtneat:
rtneat.set_weight(constants.FITNESS_INDEX[key], value)
示例11: toggle_ai_callback
def toggle_ai_callback():
global ai_state
OpenNero.toggle_ai()
if not ai_state:
module.getMod().start_rtneat()
ai_state = "Started"
elif ai_state == "Started":
ai_state = "Paused"
elif ai_state == "Paused":
ai_state = "Started"
示例12: start_nlp_extended
def start_nlp_extended(): #Natural Language Processing
""" start the tower demo """
getMod().num_disks = 3
OpenNero.disable_ai()
getMod().stop_agent()
env = TowerEnvironment()
env.initialize_blocks()
getMod().set_environment(env)
getMod().agent_id = common.addObject("data/shapes/character/MyNLPRobot.xml", OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX, TowerofHanoi.constants.GRID_DY, 2), type=TowerofHanoi.constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
OpenNero.enable_ai()
示例13: start_tower1
def start_tower1(self): #Problem reduction
""" start the tower demo """
self.num_disks = 3
OpenNero.disable_ai()
self.stop_agent()
env = TowerEnvironment()
env.initialize_blocks()
self.set_environment(env)
self.agent_id = common.addObject("data/shapes/character/BlocksRobot.xml", OpenNero.Vector3f(constants.GRID_DX, constants.GRID_DY, 2), type=constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
OpenNero.enable_ai()
示例14: start_my_planner_2_disk
def start_my_planner_2_disk():
""" start the tower demo """
getMod().num_disks = 2
OpenNero.disable_ai()
getMod().stop_agent()
env = TowerEnvironment()
env.initialize_blocks()
getMod().set_environment(env)
getMod().agent_id = common.addObject("data/shapes/character/MyPlanningRobot2.xml", OpenNero.Vector3f(TowerofHanoi.constants.GRID_DX, TowerofHanoi.constants.GRID_DY, 2), type=TowerofHanoi.constants.AGENT_MASK, scale=OpenNero.Vector3f(3,3,3))
OpenNero.enable_ai()
示例15: start_fps
def start_fps(self):
print 'start_fps was called'
if self.first_person_agent is None:
print 'adding first person agent!'
self.spawnAgent(agent_xml = 'data/shapes/character/FirstPersonAgent.xml')
OpenNero.enable_ai()
else:
print 'removing first person agent!'
common.removeObject(self.first_person_agent)
self.first_person_agent = None