本文整理汇总了Python中agent.Agent.setLocation方法的典型用法代码示例。如果您正苦于以下问题:Python Agent.setLocation方法的具体用法?Python Agent.setLocation怎么用?Python Agent.setLocation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类agent.Agent
的用法示例。
在下文中一共展示了Agent.setLocation方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: valueIteration
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import setLocation [as 别名]
def valueIteration(discountFactor):
# all locations in grid
alllocations = [ (x,y) for x in range(11) for y in range(11)]
# initialize values
values = {}
bestMoves = {}
for predloc in alllocations:
for preyloc in alllocations:
if preyloc != predloc:
values[(predloc,preyloc)] = 0
agent = Agent(0,0)
deltas = []
epsilon = 0.01
delta = 1
numIt = 0
# perform value iteration according to pseud-code
while delta > epsilon:
delta = 0
newValues = {}
# loop over all states
for predloc in alllocations:
for preyloc in alllocations:
if predloc == preyloc:
continue
agent.setLocation(predloc)
prey = Prey(*preyloc)
temp = values[(predloc,preyloc)]
# find optimal value according to current values
bestVal = 0
bestMove = (0,0)
for prob, predMove in agent.getMoveList():
preySum = 0
newPredloc = ((predloc[0] + predMove[0])%11,(predloc[1] + predMove[1])%11)
if newPredloc == preyloc :
preySum += 10.0
else:
for preyProb, newPreyloc in prey.expand(newPredloc):
preySum += preyProb * discountFactor * values[(newPredloc,newPreyloc)]
if bestVal <= preySum:
bestVal = preySum
bestMove = predMove
newValues[(predloc,preyloc)] = bestVal
bestMoves[(predloc,preyloc)] = bestMove
delta = max(delta, np.abs(bestVal - temp))
values = newValues
deltas.append(delta)
numIt+=1
# greedy policy to the optimal values computed above
def policy(state):
predloc, preyloc = state
agent.setLocation(predloc)
prey = Prey(*preyloc)
return bestMoves[(predloc,preyloc)]
return numIt, values, policy
示例2: valueIteration
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import setLocation [as 别名]
def valueIteration():
alldiffs = [ (x,y) for x in range(-5,6) for y in range(-5,6)]
alldiffs.remove((0,0))
# the relative positions vary from -5 up to 5, in both dimensions
values = {}
for x in range(-5,6):
for y in range(-5,6):
values[(x,y)] = 0
bestMoves = {}
agent = Agent(0,0)
deltas = []
discountFactor = 0.8
epsilon = 0.01
delta = 1
while delta > epsilon:
delta = 0
newValues = {}
for diff in alldiffs:
# we place the predator in the middle of the world,
# we are allowed to do this, since the positions are encoded relatively
predloc = (5,5)
preyloc = (predloc[0]+diff[0],predloc[1]+diff[1])
curKey = rewriteStates(predloc,preyloc)
agent.setLocation(predloc)
prey = Prey(*preyloc)
temp = values[curKey]
bestVal = 0
bestMove = (0,0)
for prob, predMove in agent.getMoveList():
preySum = 0
newPredloc = agent.locAfterMove(predMove)
if newPredloc == preyloc :
preySum += 10.0
else:
for preyProb, newPreyloc in prey.expand(newPredloc):
# using rewriteStates we use relative positions
preySum += preyProb * discountFactor * values[rewriteStates(newPredloc,newPreyloc)]
if bestVal <= preySum:
bestVal = preySum
bestMove = predMove
newValues[curKey] = bestVal
bestMoves[curKey] = bestMove
delta = max(delta, np.abs(bestVal - temp))
values = newValues
deltas.append(delta)
def policy(state):
predloc, preyloc = state
agent.setLocation(predloc)
prey = Prey(*preyloc)
return bestMoves[rewriteStates(predloc,preyloc)]
return policy
示例3: valueFunction
# 需要导入模块: from agent import Agent [as 别名]
# 或者: from agent.Agent import setLocation [as 别名]
def valueFunction():
# all locations on the grid
alllocations = [ (x,y) for x in range(11) for y in range(11)]
# initialize value function
values = {}
for predloc in alllocations:
for preyloc in alllocations:
if preyloc != predloc:
values[(predloc,preyloc)] = 0
# predator which is placed in the top-left
agent = Agent(0,0)
discountFactor = 0.8
epsilon = 0.01
delta = 1
numIt = 0
while delta > epsilon:
delta = 0
newValues = {}
# sweep over all possible states
for predloc in alllocations:
for preyloc in alllocations:
if predloc == preyloc:
continue
# place predator and prey at location
agent.setLocation(predloc)
prey = Prey(*preyloc)
# temp is previous value of state
temp = values[(predloc,preyloc)]
moveSum = 0
# iterates over each actionthe agent can take
# and the probability of the action according to the policy
for prob, newPredloc in agent.expand():
preySum = 0
# absorbing state
if newPredloc == preyloc :
preySum += 10.0
else:
# iterates over the states which the action can lead to, and their probability (stochastic)
for preyProb, newPreyloc in prey.expand(newPredloc):
# part of update rule (sum over s')
preySum += preyProb * discountFactor * values[(newPredloc,newPreyloc)]
# part of update rule (sum over a)
moveSum += prob * preySum
# policy evaluation update
newValues[(predloc,preyloc)] = moveSum
delta = max(delta, np.abs(moveSum - temp))
values = newValues
numIt += 1
return values, numIt