本文整理汇总了Python中game.Actions.directionToVector方法的典型用法代码示例。如果您正苦于以下问题:Python Actions.directionToVector方法的具体用法?Python Actions.directionToVector怎么用?Python Actions.directionToVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类game.Actions
的用法示例。
在下文中一共展示了Actions.directionToVector方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: applyAction
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def applyAction( state, action ):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions( state )
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint( next )
if manhattanDistance( nearest, next ) <= 0.5 :
# Remove food
PacmanRules.consume( nearest, state )
示例2: applyAction
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def applyAction( state, action ):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions( state )
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint( next )
if manhattanDistance( nearest, next ) <= 0.5 :
# Remove food
PacmanRules.consume( nearest, state )
示例3: getSuccessors
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
# dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
# hitsWall = self.walls[nextx][nexty]
"*** YOUR CODE HERE ***"
self._expanded += 1 # DO NOT CHANGE
return successors
示例4: getDistribution
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getDistribution( self, state ):
# Read variables from state
ghostState = state.getGhostState( self.index )
legalActions = state.getLegalActions( self.index )
pos = state.getGhostPosition( self.index )
isScared = ghostState.scaredTimer > 0
speed = 1
if isScared: speed = 0.5
actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state
distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
if isScared:
bestScore = max( distancesToPacman )
bestProb = self.prob_scaredFlee
else:
bestScore = min( distancesToPacman )
bestProb = self.prob_attack
bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]
# Construct distribution
dist = util.Counter()
for a in bestActions: dist[a] = bestProb / len(bestActions)
for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
dist.normalize()
return dist
示例5: applyAction
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def applyAction(state, action):
"""
Edits the actions to reflect the results of the actions.
"""
legal = TargetRules.getLegalActions(state)
if action not in legal:
raise Exception("Illegal action" + str(action))
targetState = state.data.agentStates[0]
# Update configuration
# vector = Actions.directionToVector(action, TargetRules.TARGET_SPEED)
targetState.configuration = targetState.configuration.generateSuccessor(action)
#
示例6: getDistribution
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getDistribution( self, state ):
# Read variables from state
ghostState = state.getGhostState( self.index )
legalActions = state.getLegalActions( self.index )
pos = state.getGhostPosition( self.index )
isScared = ghostState.scaredTimer > 0
speed = 1
if isScared: speed = 0.5
actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state
distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
if isScared:
bestScore = max( distancesToPacman )
bestProb = self.prob_scaredFlee
else:
bestScore = min( distancesToPacman )
bestProb = self.prob_attack
bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]
# Construct distribution
dist = util.Counter()
for a in bestActions: dist[a] = bestProb / len(bestActions)
for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
dist.normalize()
return dist
示例7: getSuccessors
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
示例8: getCostOfActions
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
示例9: getStatesFromPath
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getStatesFromPath(start, path):
"Returns the list of states visited along the path"
vis = [start]
curr = start
for a in path:
x,y = curr
dx, dy = Actions.directionToVector(a)
curr = (int(x + dx), int(y + dy))
vis.append(curr)
return vis
示例10: getSuccessors
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
示例11: getCostOfActions
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
#print(actions)
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
示例12: getFeatures
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getFeatures(self, state, action):
# extract the grid of food and wall locations and get the ghost locations
food = state.getFood()
walls = state.getWalls()
ghosts = state.getGhostPositions()
features = util.Counter()
features["bias"] = 1.0
# compute the location of pacman after he takes the action
x, y = state.getPacmanPosition()
dx, dy = Actions.directionToVector(action)
next_x, next_y = int(x + dx), int(y + dy)
# count the number of ghosts 1-step away
features["#-of-ghosts-1-step-away"] = sum((next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)
# if there is no danger of ghosts then add the food feature
if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
features["eats-food"] = 1.0
dist = closestFood((next_x, next_y), food, walls)
if dist is not None:
# make the distance a number less than one otherwise the update
# will diverge wildly
features["closest-food"] = float(dist) / (walls.width * walls.height)
features.divideAll(10.0)
return features
示例13: getSuccessors
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
示例14: getCostOfActions
# 需要导入模块: from game import Actions [as 别名]
# 或者: from game.Actions import directionToVector [as 别名]
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost