本文整理汇总了Python中util.lookup函数的典型用法代码示例。如果您正苦于以下问题:Python lookup函数的具体用法?Python lookup怎么用?Python lookup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lookup函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(
self, index=0, inference="ExactInference", ghostAgents=None, observeEnable=True, elapseTimeEnable=True
):
inferenceType = util.lookup(inference, globals())
self.inferenceModules = [inferenceType(a) for a in ghostAgents]
self.observeEnable = observeEnable
self.elapseTimeEnable = elapseTimeEnable
示例2: __init__
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
#self.epsilon = 0.05
#self.gamma = 0.8
#self.alpha = 0.2
# You might want to initialize weights here.
self.all_directions = [Directions.NORTH,
Directions.SOUTH,
Directions.EAST,
Directions.WEST,
Directions.STOP]
self.nb_states = 0
self.closest_food = None
self.featureQ = [self.ghost_west, self.ghost_east, self.ghost_north, self.ghost_south,
self.no_wall_west, self.no_wall_east, self.no_wall_north, self.no_wall_south,
#self.posx, self.posy, self.biais,
self.close_dot_west, self.close_dot_east, self.close_dot_north, self.close_dot_south]
self.w = {}
for action in self.all_directions:
temp = []
for i in range(0, len(self.featureQ)):
temp.append(1.0)
self.w[action] = temp
示例3: __init__
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
# You might want to initialize weights here.
"*** YOUR CODE HERE ***"
self.weights = util.Counter()
self.globalvar = 1
# Define NN
self.model = Sequential()
self.model.add(Dense(300, init='lecun_uniform', input_shape=(5,)))
self.model.add(Activation('relu'))
self.model.add(Dense(300, init='lecun_uniform'))
self.model.add(Activation('relu'))
self.model.add(Dense(300, init='lecun_uniform'))
self.model.add(Activation('relu'))
self.model.add(Dense(5, init='lecun_uniform'))
self.model.add(Activation('linear'))
rms = RMSprop()
self.model.compile(loss='mse', optimizer=rms)
self.atoi = {'North': 0, 'South': 1, 'East': 2, 'West': 3, 'Stop': 4}
示例4: __init__
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
# You might want to initialize weights here.
"*** YOUR CODE HERE ***"
self.weights = util.Counter()
示例5: registerInitialState
def registerInitialState(self, gameState):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
"""
self.red = gameState.isOnRedTeam(self.index)
self.distancer = distanceCalculator.Distancer(gameState.data.layout)
# comment this out to forgo maze distance computation and use manhattan distances
self.distancer.getMazeDistances()
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
inferenceType = util.lookup('ParticleFilter', globals())
self.inferenceModules = [inferenceType(o,self.index) for o in self.getOpponents(gameState)]
for inference in self.inferenceModules: inference.initialize(gameState)
self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
self.firstMove = True
示例6: __init__
def __init__(self, index, fnStrategy='defaultstrategy'):
self.index=index
strategies = fnStrategy.split(';')
try:
self.strategy = util.lookup(strategies[index%len(strategies)], globals())
except:
print "Function "+strategies[index%len(strategies)]+" not defined!"
print "Loading defaultstrategy..."
self.strategy = defaultstrategy
示例7: getattr
def getattr(self, name):
ret = None
if self.path is not None:
ret = findOnPath(name, self.path)
if ret is not None:
ret.name = self.name+'.'+ret.name
return ret
if self.mod is None:
self.mod = lookup(self.name)
return Namespace.getattr(self, name)
示例8: _process_batch
def _process_batch(session, posts, dests, modes, **kwargs):
"""
Request commute distances and durations for a batch of posts.
"""
posts_latlon = [(_.latitude, _.longitude) for _ in posts]
for mode in modes:
response = gmaps.distance_matrix(posts_latlon, dests, mode, **kwargs)
for post, row in zip(posts, response['rows']):
post.commutes.extend([
Commute(dest=lookup(session, Destination, dest),
mode=lookup(session, Mode, mode),
distance=e['distance']['value'],
duration=e['duration']['value'])
for dest, e in zip(dests, row['elements'])
if e['status'] == 'OK'
])
示例9: __init__
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2', a=0,b=0,c=0,d=0,e=0,f=0):
self.index = 0 # Pacman is always agent index 0
self.depth = int(depth)
storeConstants(a, b, c, d, e, f)
# self.a = a
# print "a: ", a
# b = b
# c = c
# d = d
# e = e
# f = f
self.evaluationFunction = util.lookup(evalFn, globals())
示例10: __init__
def __init__(self, extractor='IdentityExtractor', **args):
if extractor in args:
extractor = args['extractor']
self.featExtractor = util.lookup(extractor, globals())()
SarsaLambdaAgent.__init__(self, **args)
# You might want to initialize weights here.
"*** YOUR CODE HERE ***"
# we are not using values
self.weights = util.Counter()
self.workingWeights = util.Counter()
self.times = 0
示例11: __init__
def __init__(self, index, fnStrategy='defaultstrategy'):
self.index=index
self.xCapsule = self.yCapsule = -1
strategies = fnStrategy.split(';')
global ghostN
ghostN = "all" if len(strategies) > 1 else strategies[0]
try:
self.strategy = util.lookup(strategies[index%len(strategies)], globals())
except:
print "Function "+strategies[index%len(strategies)]+" not defined!"
self.strategy = default
示例12: addPackage
def addPackage(self, package, skiplist=[]):
pkg = lookup(package)
base = package.replace(DOT, SLASH)
for cl in listAllClasses(pkg):
name = package + "." + cl
if name in skiplist:
# print 'skipping',name # ?? dbg
continue
entryname = base + "/" + cl + ".class"
self.zipfile.putNextEntry(ZipEntry(entryname))
instream = openResource(entryname)
copy(instream, self.zipfile)
instream.close()
示例13: __init__
def __init__(self, extractor='FeatureLearner', processFreq=processingFrequency, **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
# Weight Initialization
self.featuresWeights = util.Counter()
self.rewardCounter = util.Counter()
self.rewardDict = util.Counter()
# We use the processingFrequency only if we are in
# the featExtractor is a FeatureLearner (= we have a deep learning agent)
self.processingFrequency = processingFrequency #if self.featExtractor==FeatureLearner else 10e9
示例14: __init__
def __init__(self, numDecks=1, qvalues=util.Counter(), epsilon=0.05, gamma=0.8, alpha=0.2, numTraining=1000, extractor='SimpleExtractor'):
"""
Init appropriated from qlearningAgents.py from
pacman reinforcement assignment.
"""
self.epsilon = epsilon
self.discount = gamma
self.alpha = alpha
self.numTraining = numTraining
self.dealer = Dealer.Dealer(numDecks, True) # True = Silent Mode; Agent must be modified later to actually accept a dealer
self.qvalues = qvalues
self.weights = util.Counter()
self.featExtractor = util.lookup(extractor, globals())()
示例15: __init__
def __init__(self, extractor='IdentityExtractor', **args):
if extractor in args:
extractor = args['extractor']
self.featExtractor = util.lookup(extractor, globals())()
# You might want to initialize weights here.
"*** YOUR CODE HERE ***"
self.mdp = args['mdp']
self.discount = args['gamma']
self.iterations = args['iterations']
self.alpha = args['alpha']
self.weights = util.Counter()
self.times = 0
if False: #extractor == 'BairdsExtractor':
# doing evil thing here
self.weights[0] = 1
self.weights[1] = 1
self.weights[2] = 1
self.weights[3] = 1
self.weights[4] = 1
self.weights[5] = 1
self.weights[6] = 1
# do update, full backup (sweep every state)
for time in range(self.iterations):
for state in self.mdp.getStates():
if not self.mdp.isTerminal(state):
# find the best action
maxValue = None
bestAction = None
for action in self.mdp.getPossibleActions(state):
thisValue = self.getQValue(state, action)
if bestAction == None or thisValue > maxValue:
maxValue = thisValue
bestAction = action
for nextState, prob in self.mdp.getTransitionStatesAndProbs(state, bestAction):
self.update(state, action, nextState, self.mdp.getReward(state, action, nextState), prob)
self.outputWeights(time)
self.outputValues(time)
self.outputMSE(time)