本文整理汇总了Python中utils.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了argmax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_to_cluster
def _add_to_cluster(self, cluster, _doc):
super(WavgNetCONFIRM, self)._add_to_cluster(cluster, _doc)
# competitive stage
similarities = self._cluster_sim_scores(_doc)
idx = utils.argmax(similarities)
del similarities[idx]
if similarities:
idx2 = utils.argmax(similarities)
if idx2 <= idx:
idx2 += 1
sim_vec2 = self.clusters[idx2].center.similarity_vector(_doc)
self.clusters[idx2].network.learn(sim_vec2, 0.2)
示例2: best_policy
def best_policy(mdp, U):
"""Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4)"""
pi = {}
for s in mdp.states:
pi[s] = argmax(mdp.actions(s), key=lambda a: expected_utility(a, s, U, mdp))
return pi
示例3: actions
def actions(self, state):
search_list = [c for c in self.decoder.chardomain if c not in state]
target_list = [c for c in alphabet if c not in state.values()]
# Find the best charater to replace
plainchar = argmax(search_list, key=lambda c: self.decoder.P1[c])
for cipherchar in target_list:
yield (plainchar, cipherchar)
示例4: determine_optimum_variants
def determine_optimum_variants(unit1, unit2):
"""Determines the optimum variants between two units."""
# TODO - improve performance by considering variants (1,1) (1, 2) and (2,1)
# as equivalent.
outcomes = defaultdict(dict)
for v1 in MeleeRangedStrategy.VARIANTS:
if not MeleeRangedStrategy.is_compatible(unit1, v1):
continue
unit1.strategy = MeleeRangedStrategy(unit1, v1)
for v2 in MeleeRangedStrategy.VARIANTS:
if not MeleeRangedStrategy.is_compatible(unit2, v2):
continue
unit2.strategy = MeleeRangedStrategy(unit2, v2)
turn_order = (unit1, unit2)
game_state = AveragingVersusGameState(turn_order, verbosity=0)
game_state.run_combat()
outcomes[v1][v2] = game_state.hp_delta
# What's your best strategy?
unit_1_strategies = { v1: min(outcomes[v1].values()) for v1 in outcomes }
unit1_strategy = utils.argmax(unit_1_strategies)
unit2_strategy = utils.argmin(outcomes[unit1_strategy])
# for v1 in outcomes:
# for v2, hp_delta in sorted(outcomes[v1].items()):
# print '(%d, %d) => %+.2f' % (v1, v2, hp_delta)
# print '%s\'s strategy: %s' % (unit1, unit1_strategy)
# print '%s\'s strategy: %s' % (unit2, unit2_strategy)
return (unit1_strategy, unit2_strategy)
示例5: genetic_algorithm
def genetic_algorithm(problem, population, fitness_fn, ngen=1000, pmut=0.1):
"[Fig. 4.8]"
#MAX = 0
for i in range(ngen):
new_population = []
'''
print i, '------------'
print ' ', MAX
for p in population:
print problem.value(p)
if problem.value(p) > MAX:
MAX = problem.value(p)
'''
for p in population:
fitnesses = map(fitness_fn, population)
s1, s2 = weighted_sample_with_replacement(population, fitnesses, 2)
p1 = copy.copy(problem)
p1.set_state(s1)
p2 = copy.copy(problem)
p2.set_state(s2)
child = p1.mate(p2)
child.mutate(pmut)
new_population.append(child.initial)
population = new_population
return utils.argmax(population, fitness_fn)
示例6: genetic_algorithm_stepwise
def genetic_algorithm_stepwise(population):
root.title('Genetic Algorithm')
for generation in range(ngen):
# generating new population after selecting, recombining and mutating the existing population
population = [search.mutate(search.recombine(*search.select(2, population, fitness_fn)), gene_pool, mutation_rate) for i in range(len(population))]
# genome with the highest fitness in the current generation
current_best = ''.join(argmax(population, key=fitness_fn))
# collecting first few examples from the current population
members = [''.join(x) for x in population][:48]
# clear the canvas
canvas.delete('all')
# displays current best on top of the screen
canvas.create_text(canvas_width / 2, 40, fill=p_blue, font='Consolas 46 bold', text=current_best)
# displaying a part of the population on the screen
for i in range(len(members) // 3):
canvas.create_text((canvas_width * .175), (canvas_height * .25 + (25 * i)), fill=lp_blue, font='Consolas 16', text=members[3 * i])
canvas.create_text((canvas_width * .500), (canvas_height * .25 + (25 * i)), fill=lp_blue, font='Consolas 16', text=members[3 * i + 1])
canvas.create_text((canvas_width * .825), (canvas_height * .25 + (25 * i)), fill=lp_blue, font='Consolas 16', text=members[3 * i + 2])
# displays current generation number
canvas.create_text((canvas_width * .5), (canvas_height * 0.95), fill=p_blue, font='Consolas 18 bold', text=f'Generation {generation}')
# displays blue bar that indicates current maximum fitness compared to maximum possible fitness
scaling_factor = fitness_fn(current_best) / len(target)
canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.9, 100, outline=p_blue)
canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.1 + scaling_factor * canvas_width * 0.8, 100, fill=lp_blue)
canvas.update()
# checks for completion
fittest_individual = search.fitness_threshold(fitness_fn, f_thres, population)
if fittest_individual:
break
示例7: WalkSAT
def WalkSAT(clauses, p=0.5, max_flips=10000):
"""Checks for satisfiability of all clauses by randomly flipping values of variables
"""
# Set of all symbols in all clauses
symbols = set(sym for clause in clauses for sym in prop_symbols(clause))
# model is a random assignment of true/false to the symbols in clauses
model = {s: random.choice([True, False]) for s in symbols}
for i in range(max_flips):
satisfied, unsatisfied = [], []
for clause in clauses:
(satisfied if pl_true(clause, model) else unsatisfied).append(clause)
if not unsatisfied: # if model satisfies all the clauses
return model
clause = random.choice(unsatisfied)
if probability(p):
sym = random.choice(prop_symbols(clause))
else:
# Flip the symbol in clause that maximizes number of sat. clauses
def sat_count(sym):
# Return the the number of clauses satisfied after flipping the symbol.
model[sym] = not model[sym]
count = len([clause for clause in clauses if pl_true(clause, model)])
model[sym] = not model[sym]
return count
sym = argmax(prop_symbols(clause), key=sat_count)
model[sym] = not model[sym]
# If no solution is found within the flip limit, we return failure
return None
示例8: update
def update(self,x,y):
"""
updates the ORT
- x : list of k covariates (k x 1)
- y : response (scalar)
usage:
ort.update(x,y)
"""
k = self.__poisson(1)
if k == 0:
self.__updateOOBE(x,y)
else:
for u in xrange(k):
self.__age += 1
(j,depth) = self.__findLeaf(x,self.tree)
j.elem.update(x,y)
#if j.elem.numSamplesSeen > self.minSamples and depth < self.maxDepth: # FIXME: which is the correct approach?
if j.elem.stats.n > self.minSamples and depth < self.maxDepth:
g = self.__gains(j.elem)
if any([ gg >= self.minGain for gg in g ]):
bestTest = j.elem.tests[argmax(g)]
j.elem.updateSplit(bestTest.dim,bestTest.loc)
j.updateChildren( Tree(Elem(self.param)), Tree(Elem(self.param)) )
j.left.elem.stats = bestTest.statsL
j.right.elem.stats = bestTest.statsR
j.elem.reset()
示例9: minimax_decision
def minimax_decision(state, game):
"""Given a state in a game, calculate the best move by searching
forward all the way to the terminal states. [Figure 5.3]"""
player = game.to_move(state)
def max_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for a in game.actions(state):
v = max(v, min_value(game.result(state, a)))
return v
def min_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for a in game.actions(state):
v = min(v, max_value(game.result(state, a)))
return v
# Body of minimax_decision:
return argmax(game.actions(state),
key=lambda a: min_value(game.result(state, a)))
示例10: minimax_decision
def minimax_decision(state, game):
"""Given a state in a game, calculate the best move by searching
forward all the way to the terminal states. [Fig. 6.4]"""
player = game.to_move(state)
def max_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for (_, s) in game.successors(state):
v = max(v, min_value(s))
return v
def min_value(state):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for (_, s) in game.successors(state):
v = min(v, max_value(s))
return v
# Body of minimax_decision starts here:
action, state = argmax(game.successors(state), lambda ((a, s)): min_value(s))
return action
示例11: alphabeta_full_search
def alphabeta_full_search(state, game):
"""Search game to determine best action; use alpha-beta pruning.
As in [Fig. 6.7], this version searches all the way to the leaves."""
player = game.to_move(state)
def max_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = -infinity
for (a, s) in game.successors(state):
v = max(v, min_value(s, alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, alpha, beta):
if game.terminal_test(state):
return game.utility(state, player)
v = infinity
for (a, s) in game.successors(state):
v = min(v, max_value(s, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alphabeta_search starts here:
action, state = argmax(game.successors(state),
lambda ((a, s)): min_value(s, -infinity, infinity))
return action
示例12: predict
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, key=class_probability)
示例13: predict
def predict(example):
"""Predict the target value for example. Calculate probabilities for each
class and pick the max."""
def class_probability(targetval):
attr_dist = attr_dists[targetval]
return target_dist[targetval] * product(attr_dist[a] for a in example)
return argmax(target_dist.keys(), key=class_probability)
示例14: genetic_algorithm
def genetic_algorithm(self, problem, map_canvas):
""" Genetic Algorithm modified for the given problem """
def init_population(pop_number, gene_pool, state_length):
""" initialize population """
population = []
for i in range(pop_number):
population.append(utils.shuffled(gene_pool))
return population
def recombine(state_a, state_b):
""" recombine two problem states """
start = random.randint(0, len(state_a) - 1)
end = random.randint(start + 1, len(state_a))
new_state = state_a[start:end]
for city in state_b:
if city not in new_state:
new_state.append(city)
return new_state
def mutate(state, mutation_rate):
""" mutate problem states """
if random.uniform(0, 1) < mutation_rate:
sample = random.sample(range(len(state)), 2)
state[sample[0]], state[sample[1]] = state[sample[1]], state[sample[0]]
return state
def fitness_fn(state):
""" calculate fitness of a particular state """
fitness = problem.value(state)
return int((5600 + fitness) ** 2)
current = Node(problem.initial)
population = init_population(100, current.state, len(current.state))
all_time_best = current.state
while(1):
population = [mutate(recombine(*select(2, population, fitness_fn)), self.mutation_rate.get()) for i in range(len(population))]
current_best = utils.argmax(population, key=fitness_fn)
if fitness_fn(current_best) > fitness_fn(all_time_best):
all_time_best = current_best
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(all_time_best))))
map_canvas.delete('poly')
points = []
for city in current_best:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=1, fill='', tag='poly')
best_points = []
for city in all_time_best:
best_points.append(self.frame_locations[city][0])
best_points.append(self.frame_locations[city][1])
map_canvas.create_polygon(best_points, outline='red', width=3, fill='', tag='poly')
map_canvas.update()
map_canvas.after(self.speed.get())
示例15: _update
def _update(self, action, reward):
"""Update Q according to reward received."""
Q = self.Q
maxaction = argmax(range(self.env.action_space.n), lambda a : Q[self.current_state][a] - Q[self.previous_state][action])
maxactiondiff = Q[self.current_state][maxaction] - Q[self.previous_state][action]
Q[self.previous_state][action] += self.alpha*(self.A*reward + self.B*self.gamma*maxactiondiff)