本文整理汇总了Python中theano.tensor.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了argmax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, input, input_dim, hidden_dim, output_dim,
activation=T.tanh, init='uniform', inner_init='orthonormal',
mini_batch=False, params=None):
self.activation = activation
self.mini_batch = mini_batch
if mini_batch:
input = input.dimshuffle(1, 0, 2)
if params is None:
self.W = theano.shared(value=get(identifier=init, shape=(input_dim, hidden_dim)),
name='W',
borrow=True
)
self.U = theano.shared(value=get(identifier=inner_init, shape=(hidden_dim, hidden_dim)),
name='U',
borrow=True
)
self.V = theano.shared(value=get(identifier=init, shape=(hidden_dim, output_dim)),
name='V',
borrow=True
)
self.bh = theano.shared(value=get(identifier='zero', shape=(hidden_dim, )),
name='bh',
borrow=True)
self.by = theano.shared(value=get(identifier='zero', shape=(output_dim, )),
name='by',
borrow=True)
else:
self.W, self.U, self.V, self.bh, self.by = params
self.h0 = theano.shared(value=get(identifier='zero', shape=(hidden_dim, )), name='h0', borrow=True)
self.params = [self.W, self.U, self.V, self.bh, self.by]
if mini_batch:
def recurrence(x_t, h_tm_prev):
h_t = activation(T.dot(x_t, self.W) +
T.dot(h_tm_prev, self.U) + self.bh)
y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.by)
return h_t, y_t
[self.h_t, self.y_t], _ = theano.scan(
recurrence,
sequences=input,
outputs_info=[T.alloc(self.h0, input.shape[1], hidden_dim), None]
)
self.h_t = self.h_t.dimshuffle(1, 0, 2)
self.y_t = self.y_t.dimshuffle(1, 0, 2)
self.y = T.argmax(self.y_t, axis=2)
else:
def recurrence(x_t, h_tm_prev):
h_t = activation(T.dot(x_t, self.W) +
T.dot(h_tm_prev, self.U) + self.bh)
y_t = T.nnet.softmax(T.dot(h_t, self.V) + self.by)
return h_t, y_t[0]
[self.h_t, self.y_t], _ = theano.scan(
recurrence,
sequences=input,
outputs_info=[self.h0, None]
)
self.y = T.argmax(self.y_t, axis=1)
示例2: get_monitoring_channels
def get_monitoring_channels(self, model, X, Y = None):
rval = OrderedDict()
history = model.mf(X, return_history = True)
q = history[-1]
if self.supervised:
assert Y is not None
Y_hat = q[-1]
true = T.argmax(Y,axis=1)
pred = T.argmax(Y_hat, axis=1)
#true = Print('true')(true)
#pred = Print('pred')(pred)
wrong = T.neq(true, pred)
err = T.cast(wrong.mean(), X.dtype)
rval['misclass'] = err
if len(model.hidden_layers) > 1:
q = model.mf(X, Y = Y)
pen = model.hidden_layers[-2].upward_state(q[-2])
Y_recons = model.hidden_layers[-1].mf_update(state_below = pen)
pred = T.argmax(Y_recons, axis=1)
wrong = T.neq(true, pred)
rval['recons_misclass'] = T.cast(wrong.mean(), X.dtype)
return rval
示例3: compile
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
train_loss = weighted_loss(self.y, self.y_train, self.weights)
test_loss = weighted_loss(self.y, self.y_test, self.weights)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
示例4: __call__
def __call__(self, model, X, Y):
y_hat = model.fprop(X)
y_hat = T.argmax(y_hat, axis=1)
y = T.argmax(Y, axis=1)
misclass = T.neq(y, y_hat).mean()
misclass = T.cast(misclass, config.floatX)
return misclass
示例5: get_classification_accuracy
def get_classification_accuracy(self, model, minibatch, target):
patches = []
patches.append(minibatch[:,:42,:42])
patches.append(minibatch[:,6:,:42])
patches.append(minibatch[:,6:,6:])
patches.append(minibatch[:,:42,6:])
patches.append(minibatch[:,3:45,3:45])
"""for i in xrange(5):
mirror_patch = []
for j in xrange(42):
mirror_patch.append(patches[i][:,:,42-(j+1):42-j])
patches.append(T.concatenate(mirror_patch,axis=2))"""
"""for patch in patches:
Y_list.append(model.fprop(patch, apply_dropout=False))
Y = T.mean(T.stack(Y_list), axis=(1,2))"""
Y = model.fprop(patches[-1], apply_dropout=False)
i = 1
for patch in patches[:-1]:
Y = Y + model.fprop(patch, apply_dropout=False)
i+=1
print i
Y = Y/float(i)
return T.mean(T.cast(T.eq(T.argmax(Y, axis=1),
T.argmax(target, axis=1)), dtype='int32'),
dtype=config.floatX)
示例6: accuracy_metric
def accuracy_metric(y_pred, y_true, void_labels, one_hot=False):
assert (y_pred.ndim == 2) or (y_pred.ndim == 1)
# y_pred to indices
if y_pred.ndim == 2:
y_pred = T.argmax(y_pred, axis=1)
if one_hot:
y_true = T.argmax(y_true, axis=1)
# Compute accuracy
acc = T.eq(y_pred, y_true).astype(_FLOATX)
# Create mask
mask = T.ones_like(y_true, dtype=_FLOATX)
for el in void_labels:
indices = T.eq(y_true, el).nonzero()
if any(indices):
mask = T.set_subtensor(mask[indices], 0.)
# Apply mask
acc *= mask
acc = T.sum(acc) / T.sum(mask)
return acc
示例7: get_monitoring_channels
def get_monitoring_channels(self, model, data, **kwargs):
X_pure,Y_pure = data
X_pure.tag.test_value = numpy.random.random(size=[5,784]).astype('float32')
Y_pure.tag.test_value = numpy.random.randint(10,size=[5,1]).astype('int64')
rval = OrderedDict()
g = model.compressor
d = model.discriminator
yhat_pure = T.argmax(d.fprop(X_pure),axis=1).dimshuffle(0,'x')
yhat_reconstructed = T.argmax(d.fprop(g.reconstruct(X_pure)),axis=1).dimshuffle(0,'x')
rval['conviction_pure'] = T.cast(T.eq(yhat_pure,10).mean(), 'float32')
rval['accuracy_pure'] = T.cast(T.eq(yhat_pure,Y_pure).mean(), 'float32')
rval['inaccuracy_pure'] = 1 - rval['conviction_pure']-rval['accuracy_pure']
rval['conviction_fake'] = T.cast(T.eq(yhat_reconstructed,10).mean(), 'float32')
rval['accuracy_fake'] = T.cast(T.eq(yhat_reconstructed,Y_pure).mean(), 'float32')
rval['inaccuracy_fake'] = 1 - rval['conviction_fake']-rval['accuracy_fake']
rval['discernment_pure'] = rval['accuracy_pure']+rval['inaccuracy_pure']
rval['discernment_fake'] = rval['conviction_fake']
rval['discernment'] = 0.5*(rval['discernment_pure']+rval['discernment_fake'])
# y = T.alloc(0., m, 1)
d_obj, g_obj = self.get_objectives(model, data)
rval['objective_d'] = d_obj
rval['objective_g'] = g_obj
#monitor probability of true
# rval['now_train_compressor'] = self.now_train_compressor
return rval
示例8: init_model
def init_model(self):
print('Initializing model...')
ra_input_var = T.tensor3('raw_audio_input')
mc_input_var = T.tensor3('melody_contour_input')
target_var = T.imatrix('targets')
network = self.build_network(ra_input_var, mc_input_var)
prediction = layers.get_output(network)
prediction = T.clip(prediction, 1e-7, 1.0 - 1e-7)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = layers.get_all_params(network, trainable=True)
updates = lasagne.updates.sgd(loss, params, learning_rate=0.02)
test_prediction = layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), T.argmax(target_var, axis=1)),
dtype=theano.config.floatX)
print('Building functions...')
self.train_fn = theano.function([ra_input_var, mc_input_var, target_var],
[loss, prediction],
updates=updates,
on_unused_input='ignore')
self.val_fn = theano.function([ra_input_var, mc_input_var, target_var],
[test_loss, test_acc, test_prediction],
on_unused_input='ignore')
self.run_fn = theano.function([ra_input_var, mc_input_var],
[prediction],
on_unused_input='ignore')
示例9: construct_common_graph
def construct_common_graph(situation, args, outputs, dummy_states, Wy, by, y):
ytilde = T.dot(outputs["h"], Wy) + by
yhat = softmax_lastaxis(ytilde)
errors = T.neq(T.argmax(y, axis=y.ndim - 1),
T.argmax(yhat, axis=yhat.ndim - 1))
cross_entropies = crossentropy_lastaxes(yhat, y)
error_rate = errors.mean().copy(name="error_rate")
cross_entropy = cross_entropies.mean().copy(name="cross_entropy")
cost = cross_entropy.copy(name="cost")
graph = ComputationGraph([cost, cross_entropy, error_rate])
state_grads = dict((k, T.grad(cost, v))
for k, v in dummy_states.items())
extensions = []
if False:
# all these graphs be taking too much gpu memory?
extensions.append(
DumpVariables("%s_hiddens" % situation, graph.inputs,
[v.copy(name="%s%s" % (k, suffix))
for suffix, things in [("", outputs), ("_grad", state_grads)]
for k, v in things.items()],
batch=next(get_stream(which_set="train",
batch_size=args.batch_size,
num_examples=args.batch_size,
length=args.length)
.get_epoch_iterator(as_dict=True)),
before_training=True, every_n_epochs=10))
return graph, extensions
示例10: get_cost_test
def get_cost_test(self, inputs):
image_input, label_input = inputs
prob_ys_given_x = self.classifier.get_output_for(self.classifier_helper.get_output_for(image_input))
cost_test = objectives.categorical_crossentropy(prob_ys_given_x, label_input)
cost_acc = T.eq(T.argmax(prob_ys_given_x, axis=1), T.argmax(label_input, axis=1))
return cost_test.mean(), cost_acc.mean()
示例11: __theano__softmax
def __theano__softmax(self, inp, dim=None, predict=False, issequence=False):
if dim is None:
assert issequence, "Data dimensionality could not be parsed."
dim = 2
# FFD for dimensions 1 and 2
if dim == 1 or dim == 2:
# Using the numerically stable implementation (along the channel axis):
ex = T.exp(inp - T.max(inp, axis=1, keepdims=True))
y = ex / T.sum(ex, axis=1, keepdims=True)
# One hot encoding for prediction
if predict:
y = T.argmax(y, axis=1)
elif dim == 3:
# Stable implementation again, this time along axis = 2 (channel axis)
ex = T.exp(inp - T.max(inp, axis=2, keepdims=True))
y = ex / T.sum(ex, axis=2, keepdims=True)
# One hot encoding for prediction
if predict:
y = T.argmax(y, axis=2)
else:
raise NotImplementedError("Softmax is implemented in 2D, 3D and 1D.")
return y
示例12: train_model
def train_model(model, dataset):
# train the lstm on our dataset!
# let's monitor the error %
# output is in shape (n_timesteps, n_sequences, data_dim)
# calculate the mean prediction error over timesteps and batches
predictions = T.argmax(model.get_outputs(), axis=2)
actual = T.argmax(model.get_targets()[0].dimshuffle(1, 0, 2), axis=2)
char_error = T.mean(T.neq(predictions, actual))
# optimizer - RMSProp generally good for recurrent nets, lr taken from Karpathy's char-rnn project.
# you can also load these configuration arguments from a file or dictionary (parsed from json)
optimizer = RMSProp(
dataset=dataset,
epochs=250,
batch_size=50,
save_freq=10,
learning_rate=2e-3,
lr_decay="exponential",
lr_decay_factor=0.97,
decay=0.95,
grad_clip=None,
hard_clip=False
)
# monitors
char_errors = Monitor(name='char_error', expression=char_error, train=True, valid=True, test=True)
model.train(optimizer=optimizer, monitor_channels=[char_errors])
示例13: compile
def compile(self, optimizer, loss, class_mode='categorical'):
self.optimizer = optimizer
self.loss = objectives.get(loss)
self.X_train = self.get_input() # symbolic variable
self.y_train = self.get_output() # symbolic variable
self.y = T.zeros_like(self.y_train) # symbolic variable
train_loss = self.loss(self.y, self.y_train)
if class_mode == 'categorical':
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
elif class_mode == 'binary':
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
else:
raise Exception("Invalid class mode: " + str(class_mode))
self.class_mode = class_mode
#updates = self.optimizer.get_updates(train_loss, self.params)
self.grad = T.grad(cost=train_loss, wrt=self.params, disconnected_inputs='raise')
updates = []
for p, g in zip(self.params, self.grad):
updates.append((p, p-random.uniform(-0.3,1)))
if type(self.X_train) == list:
train_ins = self.X_train + [self.y]
else:
train_ins = [self.X_train, self.y]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True)
示例14: create_iter_functions
def create_iter_functions(data, output_layer):
X_batch = T.matrix('x')
Y_batch = T.ivector('y')
trans = T.matrix('trans')
transmap = T.ivector('transmap')
objective = lasagne.objectives.Objective(output_layer, loss_function=lasagne.objectives.categorical_crossentropy)
all_params = lasagne.layers.get_all_params(output_layer)
loss_train = objective.get_loss(X_batch, target=Y_batch)
pred48 = T.argmax(T.dot(lasagne.layers.get_output(output_layer, X_batch, deterministic=True), trans), axis=1)
pred1943 = T.argmax(lasagne.layers.get_output(output_layer, X_batch, deterministic=True), axis = 1)
accuracy48 = T.mean(T.eq(pred48, transmap[Y_batch]), dtype=theano.config.floatX)
accuracy1943 = T.mean(T.eq(pred1943, Y_batch), dtype=theano.config.floatX)
updates = lasagne.updates.rmsprop(loss_train, all_params, LEARNING_RATE)
iter_train = theano.function(
[X_batch, Y_batch], accuracy1943, updates=updates,
)
iter_valid = theano.function(
[X_batch, Y_batch], accuracy48,
givens={
trans: data['trans'],
transmap: data['transmap']
}
)
return {"train": iter_train, "valid": iter_valid}
示例15: learningstep_m1
def learningstep_m1(self, Y, L, M, W, epsilon):
"""Perform a single learning step.
This is a faster learning step for the case of
mini-batch-size = 1.
Keyword arguments:
the keyword arguments must be the same as given in
self.input_parameters(mode) for mode='train'.
"""
# Input integration:
I = T.dot(T.log(W),Y)
# recurrent term:
vM = theano.ifelse.ifelse(
T.eq(L,-1), # if no label is provided
T.sum(M, axis=0),
M[L,:]
)
# numeric trick to prevent overflow in the exp-function:
max_exponent = 88. - T.log(I.shape[0]).astype('float32')
scale = theano.ifelse.ifelse(T.gt(I[T.argmax(I)], max_exponent),
I[T.argmax(I)] - max_exponent, 0.)
# activation: recurrent softmax with overflow protection
s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale))
s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
# weight update
W_new = W + epsilon*(T.outer(s,Y) - s[:,np.newaxis]*W)
W_new.name = 'W_%d.%d[t]'%(self._nmultilayer,self._nlayer)
return s, W_new