当前位置: 首页>>代码示例>>Python>>正文


Python Terrain.get_goal_position方法代码示例

本文整理汇总了Python中terrain.Terrain.get_goal_position方法的典型用法代码示例。如果您正苦于以下问题:Python Terrain.get_goal_position方法的具体用法?Python Terrain.get_goal_position怎么用?Python Terrain.get_goal_position使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在terrain.Terrain的用法示例。


在下文中一共展示了Terrain.get_goal_position方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Agent

# 需要导入模块: from terrain import Terrain [as 别名]
# 或者: from terrain.Terrain import get_goal_position [as 别名]
class Agent(object):
    """
        Class to handle decisions about getting to the goal.

        :param: Terrain - the terrain on which the agent is navigating
        :param: Heuristic - the difficulty level of the heuristic
    """
    def __init__(self, terrain, heuristic):
        self.terrain = Terrain(terrain)
        self.directions = {'N': (0, -1),
                           'E': (1, 0),
                           'S': (0, 1),
                           'W': (-1, 0)}
        self.start_node = Node(self.terrain.get_start_position(), 'N', 0)
        # Push our start position onto the heap
        self.search_heap = SearchHeap(initial=[self.start_node],
                                      g_func=lambda node: node.g,
                                      h_func=heuristic,
                                      goal=self.terrain.get_goal_position())
        self.visited = []
        self.position = list(self.terrain.get_start_position())
        self.facing = 'N'
        self.action_costs = {'forward': lambda cost: -cost,
                             'bash': lambda cost: -3,
                             'turn': lambda cost: -ceil(float(cost)/float(3)),
                             'demolish': lambda cost: -4}
        print "goal position:"
        print self.terrain.get_goal_position()

    def a_star_search(self):
        """A* search for the goal"""
        while self.search_heap.is_not_empty():
            node = self.search_heap.pop()
            # add the node to self.visited to show we visited it
            self.visited.append(node)
            """
            print "current position:"
            print node.position
            print "current direction:"
            print node.direction
            print "node g score:"
            print node.g
            """
            if self.terrain.is_goal_node(node):
                # TODO: make it return the path to the goal
                # as a sequence of nodes
                print "Score of the path:"
                print node.g + 100
                print "Number of actions required to reach the goal:"
                print node.depth
                print "Number of nodes expanded:"
                print len(self.visited)
                break

            for action, neighbor in self.get_search_neighbors(node).iteritems():
                last_time_visited = self.has_been_visited_already(neighbor)
                if last_time_visited is None and self.terrain.node_inside_terrain(neighbor):
                    neighbor.g = self.assign_g_cost(neighbor, node, self.terrain, action)
                    self.search_heap.push(neighbor)

    def get_search_neighbors(self, node):
        """Returns a list of node leaves from the given node."""
        # These things create nodes
        turn_left = Node(position=node.position,
                         direction=self.turn_left(node),
                         depth=node.depth + 1)
        turn_right = Node(position=node.position,
                          direction=self.turn_right(node),
                          depth=node.depth + 1)
        move_forward = Node(position=self.forward(node),
                            direction=node.direction,
                            depth=node.depth + 1)
        bash_and_forward = Node(position=self.bash_and_forward(node),
                                direction=node.direction,
                                depth=node.depth + 1)
        # return the nodes
        return {'turn_left': turn_left,
                'turn_right': turn_right,
                'move_forward': move_forward,
                'bash_and_forward': bash_and_forward}

    def assign_g_cost(self, node, parent, terrain, action):
        if 'turn' in action:
            # Update the g costs of the nodes
            return(parent.g +
                   self.action_costs['turn'](
                           self.terrain.get_cost_from_tuple(
                               node.position)))
        elif action == 'move_forward':
            return parent.g + self.action_costs['forward'](
                                    self.terrain.get_cost_from_tuple(
                                        node.position))
        else:
            return parent.g + self.action_costs['bash'](0) + self.action_costs['forward'](
                                        self.terrain.get_cost_from_tuple(
                                            node.position))

    def forward(self, node):
        """The rules to move forward"""
        new_pos = (node.position[0] + self.directions[node.direction][0],
#.........这里部分代码省略.........
开发者ID:Rdbaker,项目名称:AI-HW-1,代码行数:103,代码来源:agent.py


注:本文中的terrain.Terrain.get_goal_position方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。