当前位置: 首页>>代码示例>>Python>>正文


Python OpenNero.get_environment方法代码示例

本文整理汇总了Python中OpenNero.get_environment方法的典型用法代码示例。如果您正苦于以下问题:Python OpenNero.get_environment方法的具体用法?Python OpenNero.get_environment怎么用?Python OpenNero.get_environment使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在OpenNero的用法示例。


在下文中一共展示了OpenNero.get_environment方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: set_display_hint

# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import get_environment [as 别名]
 def set_display_hint(self):
     """
     set the display hint above the agent's head (toggled with F2)
     """
     display_hint = constants.getDisplayHint()
     if display_hint:
         if display_hint == 'fitness':
             self.state.label = '%.2f' % self.org.fitness
         elif display_hint == 'time alive':
             self.state.label = str(self.org.time_alive)
         elif display_hint == 'hit points':
             self.state.label = ''.join('.' for i in range(int(5*OpenNero.get_environment().get_hitpoints(self))))
         elif display_hint == 'id':
             self.state.label = str(self.org.genome.id)
         elif display_hint == 'champion':
             if self.org.champion:
                 self.state.label = 'champ!'
             else:
                 self.state.label = ''
         elif display_hint == 'rank':
             self.state.label = str(self.org.rank)
         elif display_hint == 'debug':
             self.state.label = str(OpenNero.get_environment().get_state(self))
         else:
             self.state.label = '?'
     else:
         # the first time we switch away from displaying stuff,
         # change the window caption
         if self.state.label:
             self.state.label = ""
开发者ID:baviera08,项目名称:opennero,代码行数:32,代码来源:agent.py

示例2: parseInputCommand

# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import get_environment [as 别名]
def parseInputCommand(content):
    """
    Parse commands from training window
    """
    mod = getMod()
    command, arg = content.attrib['command'], content.attrib['arg']
    # first word is command rest is filename
    if command.isupper():
        vali = int(arg)
    if command == "LT": mod.ltChange(vali)
    if command == "EE": mod.eeChange(vali)
    if command == "HP": mod.hpChange(vali)
    if command == "SP": mod.set_speedup(vali)
    if command == "save1": mod.save_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "load1": mod.load_team(arg, constants.OBJECT_TYPE_TEAM_0)
    if command == "rtneat": mod.deploy('rtneat')
    if command == "qlearning": mod.deploy('qlearning')
    if command == "pause": OpenNero.disable_ai()
    if command == "resume": OpenNero.enable_ai()
    if command == "example":
        print 'command: example'
        if arg == "start":
            print 'command: example start'
            mod.start_demonstration()
        elif arg == "cancel":
            print 'command: example cancel'
            OpenNero.get_environment().cancel_demonstration()
        elif arg == "confirm":
            print 'command: example confirm'
            OpenNero.get_environment().use_demonstration()
开发者ID:gjacobrobertson,项目名称:opennero-394n,代码行数:32,代码来源:module.py

示例3: load_rtneat

# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import get_environment [as 别名]
 def load_rtneat(self, location , pop, team=constants.OBJECT_TYPE_TEAM_0):
     location = os.path.relpath("/") + location
     if os.path.exists(location):
         OpenNero.set_ai("rtneat-%s" % team, OpenNero.RTNEAT(
                 str(location), "data/ai/neat-params.dat",
                 constants.pop_size,
                 OpenNero.get_environment().agent_info.reward))
开发者ID:chyt,项目名称:CS343-Hw5,代码行数:9,代码来源:module.py

示例4: evaluate_trace

# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import get_environment [as 别名]
    def evaluate_trace(self):
        """
        evaluate agent and compute fitness based on trace information
        """

        # flush network from previous activations
        org = self.get_org()
        org.net.flush()
        
        environment = OpenNero.get_environment()
        trace = environment.trace

        # place the agent at the beginning of the trace
        self.state.position = OpenNero.Vector3f(
            trace.position[0].x, trace.position[0].y, trace.position[0].z)
        self.state.rotation = OpenNero.Vector3f(
            trace.rotation[0].x, trace.rotation[0].y, trace.rotation[0].z)
        current_step = trace.initial_step
        j = 0  # trace index at which error is calculated
        while j < len(trace.position)-1 and current_step < environment.STEPS_PER_EPISODE:
            self.state.position = position
            self.state.rotation = rotation
            sensors = environment.sense(self)
            actions = self.network_action(sensors)

            # error based on position - find index in trace where error based on
            # current position starts to increase, i.e. starting with the current
            # trace index, we find position in trace that is closest to the current
            # state position.
            error1 = trace.position[j].getDistanceFrom(position)
            error2 = trace.position[j+1].getDistanceFrom(position)
            while error1 >= error2 and j < len(trace.position)-2:
                j += 1
                error1 = error2
                error2 = trace.position[j+1].getDistanceFrom(position)

            if error1 > self.ERROR_THRESHOLD:
                break

            # calculate new position, orientation, and velocity
            self.environment.act(self, actions)
            current_step += 1

        self.passed_steps = j
        return float(j)/len(trace.position)
开发者ID:DavidDeAngelo,项目名称:opennero,代码行数:47,代码来源:agent.py

示例5: normalize_reward

# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import get_environment [as 别名]
 def normalize_reward(self, reward):
     """
     Combine reward vector into a single value in the range [0,1]
     """
     weighted_sum = 0
     min_sum = 0
     max_sum = 0
     environment = OpenNero.get_environment()
     for i, f in enumerate(constants.FITNESS_DIMENSIONS):
         weight = environment.reward_weights[f]
         weighted_sum += weight * reward[i]
         min_sum += abs(weight) * -1.0
         max_sum += abs(weight)
     normalized_reward = weighted_sum
     if max_sum > min_sum: #Normalize weighted sum to [0, 1]
         d = max_sum - min_sum
         normalized_reward = (normalized_reward - min_sum) / d
     return normalized_reward
开发者ID:gjacobrobertson,项目名称:opennero-394n,代码行数:20,代码来源:agent.py

示例6: destroy

# 需要导入模块: import OpenNero [as 别名]
# 或者: from OpenNero import get_environment [as 别名]
 def destroy(self):
     env = OpenNero.get_environment()
     if env is not None:
         env.remove_agent(self)
     return True
开发者ID:DavidDeAngelo,项目名称:opennero,代码行数:7,代码来源:agent.py


注:本文中的OpenNero.get_environment方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。