本文整理汇总了Python中numpy.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了argmax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict
def predict(self, X):
"""
Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix},
Shape = [n_samples, n_features]
Matrix of training samples.
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X), axis=1)
else: # 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X)
for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(
lambda x:
np.argmax(np.bincount(x, weights=self.weights)),
axis=1,
arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
示例2: get_batch
def get_batch(self, model, batch_size):
len_memory = len(self.memory)
num_actions = 6
encouraged_actions = np.zeros(num_actions, dtype=np.int)
predicted_actions = np.zeros(num_actions, dtype=np.int)
inputs = np.zeros((min(len_memory, batch_size), 4, 80, 74))
targets = np.zeros((inputs.shape[0], num_actions))
q_list = np.zeros(inputs.shape[0])
for i, idx in enumerate(np.random.randint(0, len_memory, size=inputs.shape[0])):
input_t, action_t, reward_t, input_tp1 = self.memory[idx][0]
terminal = self.memory[idx][1]
inputs[i] = input_t
targets[i] = model.predict(input_t.reshape(1, 4, 80, 74))[0]
q_next = np.max(model.predict(input_tp1.reshape(1, 4, 80, 74))[0])
q_list[i] = np.max(targets[i])
predicted_actions[np.argmax(targets[i])] += 1
targets[i, action_t] = (1. - terminal) * self.discount * q_next + reward_t
if reward_t > 0. or terminal:
print "Action %d rewarded with %f (sample #%d)"%(action_t, targets[i, action_t], idx)
encouraged_actions[np.argmax(targets[i])] += 1
return inputs, targets, encouraged_actions, predicted_actions, np.average(q_list)
示例3: choose_action
def choose_action(planner_type=1):
""" Select action based on various action selection methods, depending on the planner_type parameter
Parameters
----------
planner_type:
1 .. planner that chooses random action
2 .. greedy planner
3 .. randomized planner
4 .. naive reward matrix planner (decide optimally if all rewards are known at given state and explore unexplored otherwise)
"""
if (planner_type == 1):
return random.choice(self.actions)
elif (planner_type == 2):
return self.actions[np.argmax(self.q[self.state_index(self.state)])]
elif (planner_type == 3):
return self.actions[np.argmax(self.q[self.state_index(self.state)])] if random.random() > 0.1 else random.choice(self.actions)
elif (planner_type == 4):
if (np.count_nonzero(self.rewards[self.state_index(self.state)]) == len(self.actions)): # case where all actions have been explored (assumes zero reward does not occur)
#print 'I have learned all rewards in this state'
return self.actions[np.argmax(self.rewards[self.state_index(self.state)])]
else: # case where actions still need to be explored
for i in range(len(self.actions)): # identif first unexplored action and try it
if (self.rewards[self.state_index(self.state),i] == 0):
return self.actions[i]
示例4: predict
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self.weights)), axis=1, arr=predictions
)
maj = self.le_.inverse_transform(maj)
return maj
示例5: KTCheckOverValidLoop
def KTCheckOverValidLoop(cc):
#
# Check-over-Valid Loop CONDITION
#
if(cc.mCVI + cc.kCB <= 2500):
#
# Check-over-Valid Loop BODY
#
# Socket, Rq#, B, first, last, sizeIn, sizeOut, x128+s0, x128+s1, maxT, maxR, minS, maxS
X, Y = KNFetchImgs(cc.sock, 1, cc.kCB, 22500+cc.mCVI, 22500+cc.mCVI+cc.kCB, 256, 192, 1, 1, 0, 0, 1.0, 1.0)
YEst = cc.model.predict({"input":X})["output"]
yDiff = np.argmax(Y, axis=1) != np.argmax(YEst, axis=1)
cc.mCVI += cc.kCB
cc.mCVErrCnt += long(np.sum(yDiff))
sys.stdout.write("\rChecking... {:5d} valid set errors on {:5d} checked ({:7.3f}%)".format(
cc.mCVErrCnt, cc.mCVI, 100.0*float(cc.mCVErrCnt)/cc.mCVI))
sys.stdout.flush()
return cc.invoke(KTCheckOverValidLoop, snap=cc.shouldCVSnap)
else:
#
# Check-over-Valid Loop EPILOGUE
#
cc.log({"validErr":float(cc.mCVErrCnt)/cc.mCVI})
sys.stdout.write("\n")
sys.stdout.flush()
return cc.invoke(KTEpochLoopEnd, snap=False)
示例6: compare_subcarrier_location
def compare_subcarrier_location(alpha, M, K, overlap, oversampling_factor):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
goofy_ordering = False
taps = gfdm_filter_taps('rrc', alpha, M, K, oversampling_factor)
A0 = gfdm_modulation_matrix(taps, M, K, oversampling_factor, group_by_subcarrier=goofy_ordering)
n = np.arange(M * K * oversampling_factor, dtype=np.complex)
colors = iter(cm.rainbow(np.linspace(0, 1, K)))
for k in range(K):
color = next(colors)
f = np.exp(1j * 2 * np.pi * (float(k) / (K * oversampling_factor)) * n)
F = abs(np.fft.fft(f))
fm = np.argmax(F) / M
plt.plot(F, '-.', label=k, color=color)
data = get_zero_f_data(k, K, M)
x0 = gfdm_gr_modulator(data, 'rrc', alpha, M, K, overlap, compat_mode=goofy_ordering) * (2. / K)
f0 = 1. * np.argmax(abs(np.fft.fft(x0))) / M
plt.plot(abs(np.fft.fft(x0)), label='FFT' + str(k), color=color)
xA = A0.dot(get_data_matrix(data, K, group_by_subcarrier=goofy_ordering).flatten()) * (1. / K)
fA = np.argmax(abs(np.fft.fft(xA))) / M
plt.plot(abs(np.fft.fft(xA)), '-', label='matrix' + str(k), color=color)
print fm, fA, f0
plt.legend()
plt.show()
示例7: test_decision_function_shape
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
示例8: _findUSpace
def _findUSpace(self):
"""Find independent U components with respect to invariant
rotations.
"""
n = len(self.invariants)
R6zall = numpy.tile(-numpy.identity(6, dtype=float), (n, 1))
R6zall_iter = numpy.split(R6zall, n, axis=0)
i6kl = ((0, (0, 0)), (1, (1, 1)), (2, (2, 2)),
(3, (0, 1)), (4, (0, 2)), (5, (1, 2)))
for op, R6z in zip(self.invariants, R6zall_iter):
R = op.R
for j, Ucj in enumerate(self.Ucomponents):
Ucj2 = numpy.dot(R, numpy.dot(Ucj, R.T))
for i, kl in i6kl:
R6z[i,j] += Ucj2[kl]
Usp6 = nullSpace(R6zall)
# normalize Usp6 by its maximum component
mxcols = numpy.argmax(numpy.fabs(Usp6), axis=1)
mxrows = numpy.arange(len(mxcols))
Usp6 /= Usp6[mxrows,mxcols].reshape(-1, 1)
Usp6 = numpy.around(Usp6, 2)
# normalize again after rounding to get correct signs
mxcols = numpy.argmax(numpy.fabs(Usp6), axis=1)
Usp6 /= Usp6[mxrows,mxcols].reshape(-1, 1)
self.Uspace = numpy.tensordot(Usp6, self.Ucomponents, axes=(1, 0))
self.Uisotropy = (len(self.Uspace) == 1)
return
示例9: _best_path
def _best_path(self, unlabeled_sequence):
T = len(unlabeled_sequence)
N = len(self._states)
self._create_cache()
self._update_cache(unlabeled_sequence)
P, O, X, S = self._cache
V = np.zeros((T, N), np.float32)
B = -np.ones((T, N), np.int)
V[0] = P + O[:, S[unlabeled_sequence[0]]]
for t in range(1, T):
for j in range(N):
vs = V[t-1, :] + X[:, j]
best = np.argmax(vs)
V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]]
B[t, j] = best
current = np.argmax(V[T-1,:])
sequence = [current]
for t in range(T-1, 0, -1):
last = B[t, current]
sequence.append(last)
current = last
sequence.reverse()
return list(map(self._states.__getitem__, sequence))
示例10: action_callback
def action_callback(self, state):
'''
Implement this function to learn things and take actions.
Return 0 if you don't want to jump and 1 if you do.
'''
# You might do some learning here based on the current state and the last state.
# You'll need to select and action and return it.
# Return 0 to swing and 1 to jump.
new_state = state
self.flag += 1
if self.last_state == None:
self.flag = 1
return 0
if self.flag == 2:
self.gravity = state['monkey']['vel']
#if self.epsilon < random.random():
index = self.find_index(state)
old_action = self.Q[self.find_index(self.last_state)][self.last_action]
#print old_action
self.Q[index] = old_action + self.alpha * (self.last_reward + self.gamma * np.argmax(self.Q[index]) - old_action)
self.last_action = np.argmax(self.Q[index])
# else:
# self.last_action = random.randrange(0, 2)
self.last_state = new_state
#print [self.Q[index][0], self.Q[index][1]]
self.time += 0.1
self.epsilon = 1 / self.time
return self.last_action
示例11: forwardProp
def forwardProp(self,node,correct, guess):
cost = total = 0.0
if node.isLeaf == True:
node.fprop = True
node.hActs1 = self.L[:, node.word]
node.probs = softmax(self.Ws.dot(node.hActs1)+self.bs)
p = node.probs*make_onehot(node.label, len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
return cost, 1
c1,t1 = self.forwardProp(node.left,correct,guess)
c2,t2 = self.forwardProp(node.right,correct,guess)
if node.left.fprop and node.right.fprop:
node.fprop = True
h = np.hstack([node.left.hActs1, node.right.hActs1])
tmp = np.zeros(len(node.left.hActs1))
for i in range(len(tmp)):
tmp[i] = h.dot(self.V[i]).dot(h)
node.hActs1 = self.ReLU(self.W.dot(h) + self.b + tmp)
node.probs = softmax(self.Ws.dot(node.hActs1)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
cost += c1
cost += c2
total += t1
total += t2
return cost, total + 1
示例12: predict
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
示例13: get_next_list_tensor
def get_next_list_tensor(self, inc= None):
'''Returns the next batch in a list, where each element of the list
corresponds to a single sequence.
'''
batch= list()
# define the increment of the cursor between calls
if inc is None:
inc= self.window_size
for b in range(self.batch_size): # each element of the batch has a cursor
# confirm that the current window stays in the page
while (self.cursor[b]+self.window_size)>(self.cumlength[self.cursor_page[b]]):
#self.cursor[b] = (self.cursor[b] + self.window_size )%self.length
self.cursor[b] = (self.cursor[b] + inc )%self.length
self.cursor_page[b] = np.argmax(self.cursor[b]<self.cumlength)
# get window for current cursor
start_idx= self.cursor[b] - (self.cumlength[self.cursor_page[b]] - self.page_len[self.cursor_page[b]])
batch.append( self.data[ self.cursor_page[b] ][start_idx:(start_idx+self.window_size)] )
# update cursor
#self.cursor[b] = (self.cursor[b] + self.window_size)%self.length
self.cursor[b] = (self.cursor[b] + inc)%self.length
self.cursor_page[b] = np.argmax(self.cursor[b]<self.cumlength)
return batch
示例14: minimize_energy
def minimize_energy(S_init=None, R_init=None):
en = 0.
curen = -np.inf
i=0
en_tot = -np.inf
curS = S_init
curR = R_init
while (np.abs(en - curen) > eps):
i+=1
if curR is not None:
# optimize topics given regions
ans = np.zeros((segment_num, topics_num))
for v, l in struct2.iteritems():
ans[v,:] -= binary_prescale * w2[:,curR[l]].sum(axis=1).flatten()
curS = np.argmax(ans, axis=1).flatten()
# optimize regions given topics
# unary energy
ans = alpha *alpha_prescale * np.log(pR)
# binary energy
for v, l in struct1.iteritems():
ans[v,:] -= binary_prescale * w2[curS[l],:].sum(axis=0).flatten()
curR = np.argmax(ans, axis=1).flatten()
curen, en = np.sum(np.max(ans, axis=1)), curen
en_un = alpha_prescale * alpha * np.log(pR)
en_un = en_un[range(len(curR)),curR].sum()
if curen > en_tot:
S = curS
R = curR
en_tot = curen
return R,S, en_tot
示例15: ApproxCharacteristicMatrix
def ApproxCharacteristicMatrix(self, B, c):
if B <= 3:
print "Parameter B should be greater than 3!"
return None
if c < 1:
print "Parameter B should be greater than 1!"
return None
Ixy = float('-inf')
for y in range(2, B/2 + 1):
x = B/y
I = ApproxMaxMI(self.D,x,y,c*x)
IPerp = ApproxMaxMI(self.DPerp,x,y,c*x)
maxI_index = np.argmax(I)
maxIPerp_index = np.argmax(IPerp)
if I[maxI_index] > IPerp[maxIPerp_index]:
tempMaxI = I[maxI_index]
max_x = maxI_index
else:
tempMaxI = IPerp[maxIPerp_index]
max_x = maxIPerp_index
if tempMaxI > Ixy:
Ixy = tempMaxI
Mxy = Ixy/np.log(min(max_x,y))
return Mxy