本文整理汇总了Python中theano.tensor.eq函数的典型用法代码示例。如果您正苦于以下问题:Python eq函数的具体用法?Python eq怎么用?Python eq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了eq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compile
def compile(self, optimizer, loss, class_mode='categorical'):
self.optimizer = optimizer
self.loss = objectives.get(loss)
self.X_train = self.get_input() # symbolic variable
self.y_train = self.get_output() # symbolic variable
self.y = T.zeros_like(self.y_train) # symbolic variable
train_loss = self.loss(self.y, self.y_train)
if class_mode == 'categorical':
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
elif class_mode == 'binary':
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
else:
raise Exception("Invalid class mode: " + str(class_mode))
self.class_mode = class_mode
#updates = self.optimizer.get_updates(train_loss, self.params)
self.grad = T.grad(cost=train_loss, wrt=self.params, disconnected_inputs='raise')
updates = []
for p, g in zip(self.params, self.grad):
updates.append((p, p-random.uniform(-0.3,1)))
if type(self.X_train) == list:
train_ins = self.X_train + [self.y]
else:
train_ins = [self.X_train, self.y]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True)
示例2: AdaMaxAvg2
def AdaMaxAvg2(ws, objective, alpha=.01, beta1=.1, beta2=.001, beta3=0.01, n_accum=1):
if n_accum == 1:
return AdaMaxAvg(ws, objective, alpha, beta1, beta2, beta3)
print 'AdaMax_Avg2', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2,'beta3:',beta3,'n_accum:',n_accum
gs = G.ndict.T_grad(objective.sum(), ws, disconnected_inputs='raise')
new = OrderedDict()
from theano.ifelse import ifelse
it = G.sharedf(0.)
new[it] = it + 1
reset = T.eq(T.mod(it,n_accum), 0)
update = T.eq(T.mod(it,n_accum), n_accum-1)
ws_avg = []
for j in range(len(ws)):
w_avg = {}
for i in ws[j]:
_w = ws[j][i]
_g = gs[j][i]
#_g = T.switch(T.isnan(_g),T.zeros_like(_g),_g) #remove NaN's
mom1 = G.sharedf(_w.get_value() * 0.)
_max = G.sharedf(_w.get_value() * 0.)
w_avg[i] = G.sharedf(_w.get_value())
g_sum = G.sharedf(_w.get_value() * 0.)
new[g_sum] = ifelse(reset, _g, g_sum + _g)
new[mom1] = ifelse(update, (1-beta1) * mom1 + beta1 * new[g_sum], mom1)
new[_max] = ifelse(update, T.maximum((1-beta2)*_max, abs(new[g_sum]) + 1e-8), _max)
new[_w] = ifelse(update, _w + alpha * new[mom1] / new[_max], _w)
new[w_avg[i]] = ifelse(update, beta3 * new[_w] + (1.-beta3) * w_avg[i], w_avg[i])
ws_avg += [w_avg]
return new, ws_avg
示例3: compile
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
train_loss = weighted_loss(self.y, self.y_train, self.weights)
test_loss = weighted_loss(self.y, self.y_test, self.weights)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
示例4: get_action_results
def get_action_results(self,last_states,actions,time_i):
#state is a boolean vector: whether or not i-th action
#was tried already during this session
#last output[:,end_code] always remains 1 after first being triggered
last_state = check_list(last_states)[0]
action = check_list(actions)[0]
batch_range = T.arange(action.shape[0])
session_active = T.eq(last_state[:,self.end_action_id],0)
state_after_action = T.set_subtensor(last_state[batch_range,action],1)
new_state = T.switch(
session_active.reshape([-1,1]),
state_after_action,
last_state
)
session_terminated = T.eq(new_state[:,self.end_action_id],1)
observation = T.concatenate([
self.joint_data[batch_range,action,None],#uint8[batch,1]
session_terminated.reshape([-1,1]), #whether session has been terminated by now
T.extra_ops.to_one_hot(action,self.joint_data.shape[1]),
],axis=1)
return new_state, observation
示例5: getRpRnTpTnForTrain0OrVal1
def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
# The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
# Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
# param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]
yPredToUse = self.y_pred_train if training0OrValidation1 == 0 else self.y_pred_val
checkDimsOfYpredAndYEqual(y, yPredToUse, "training" if training0OrValidation1 == 0 else "validation")
returnedListWithNumberOfRpRnTpTnForEachClass = []
for class_i in xrange(0, self._numberOfOutputClasses) :
#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
tensorOneAtRealPos = T.eq(y, class_i)
tensorOneAtRealNeg = T.neq(y, class_i)
tensorOneAtPredictedPos = T.eq(yPredToUse, class_i)
tensorOneAtPredictedNeg = T.neq(yPredToUse, class_i)
tensorOneAtTruePos = T.and_(tensorOneAtRealPos,tensorOneAtPredictedPos)
tensorOneAtTrueNeg = T.and_(tensorOneAtRealNeg,tensorOneAtPredictedNeg)
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealPos) )
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealNeg) )
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTruePos) )
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTrueNeg) )
return returnedListWithNumberOfRpRnTpTnForEachClass
示例6: __call__
def __call__(self, input_):
m = input_.mean()
v = input_.std()
new_m = T.switch(T.eq(self.m, 0.),
m,
(np.float32(1.) - self.rate) * self.m + self.rate * m)
new_var = T.switch(T.eq(self.var, 0.),
v,
(np.float32(1.) - self.rate) * self.var + self.rate * v)
updates = [(self.m, new_m), (self.var, new_var)]
input_centered = (
(input_ - new_m) / T.maximum(1., T.sqrt(new_var)))
input_ = T.zeros_like(input_) + input_
outs = OrderedDict(
x=input_,
x_centered=input_centered,
m=new_m,
var=new_var
)
return outs, updates
示例7: test_tt
def test_tt(self):
sample, updates = rejection_sample([self.fair_coin,], tensor.eq(tensor.sum(tensor.eq(self.coin, self.data)), 5))
sampler = theano.function([], sample, updates=updates)
# TODO: this is super-slow, how can bher do this fast?
for i in range(100):
print sampler()
示例8: functions
def functions(network):
# Symbolic variables
X = T.tensor4()
Y = T.ivector()
# Non-deterministic training
parameters = nn.layers.get_all_params(layer=network, trainable=True)
output = nn.layers.get_output(layer_or_layers=network, inputs=X,
deterministic=False)
prediction = output.argmax(-1)
loss = T.mean(nn.objectives.categorical_crossentropy(
predictions=output, targets=Y))
accuracy = T.mean(T.eq(prediction, Y))
gradient = T.grad(cost=loss, wrt=parameters)
update = nn.updates.nesterov_momentum(loss_or_grads=gradient,
params=parameters, learning_rate=0.001, momentum=0.9)
training_function = theano.function(
inputs=[X, Y], outputs=[loss, accuracy], updates=update)
# Non-deterministic testing
test_function = theano.function(
inputs=[X], outputs=prediction)
# Deterministic validation
det_output = nn.layers.get_output(layer_or_layers=network, inputs=X,
deterministic=True)
det_prediction = det_output.argmax(-1)
det_loss = T.mean(nn.objectives.categorical_crossentropy(
predictions=det_output, targets=Y))
det_accuracy = T.mean(T.eq(det_prediction, Y))
validation_function = theano.function(
inputs=[X, Y], outputs=[det_loss, det_accuracy])
return training_function, validation_function, test_function
示例9: custom_svrg1
def custom_svrg1(loss, params, m=100, learning_rate=0.01):
grads = theano.grad(loss, params)
updates = OrderedDict()
it_num = theano.shared(np.cast['int16'](0.))
it = it_num + 1
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
mu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
grad_w_tilde = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
new_grad_w_tilde = theano.ifelse.ifelse(T.eq(it, m), grad, grad_w_tilde)
mu_acc = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
updates[param] = param - learning_rate * (grad - grad_w_tilde + mu)
updates[grad_w_tilde] = new_grad_w_tilde
updates[mu] = theano.ifelse.ifelse(T.eq(T.mod(it, m), 0), mu_acc, mu)
updates[mu_acc] = theano.ifelse.ifelse(T.eq(T.mod(it, m), 0), 0*mu_acc, mu_acc + grad)
updates[it_num] = theano.ifelse.ifelse(T.eq(it, m), np.cast['int16'](1), np.cast['int16'](m))
return updates
示例10: multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1
def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
"""
The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
"""
returnedListWithNumberOfRpRnPpPnForEachClass = []
for class_i in xrange(0, self.numberOfOutputClasses) :
#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
vectorOneAtRealPositives = T.eq(y, class_i)
vectorOneAtRealNegatives = T.neq(y, class_i)
if training0OrValidation1 == 0 : #training:
yPredToUse = self.y_pred
else: #validation
yPredToUse = self.y_pred_inference
vectorOneAtPredictedPositives = T.eq(yPredToUse, class_i)
vectorOneAtPredictedNegatives = T.neq(yPredToUse, class_i)
vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealPositives) )
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealNegatives) )
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedPositives) )
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedNegatives) )
return returnedListWithNumberOfRpRnPpPnForEachClass
示例11: each_loss
def each_loss(outpt, inpt):
# y 是填充了blank之后的ans
blank = 26
y_nblank = T.neq(inpt, blank)
n = T.dot(y_nblank, y_nblank) # 真实的字符长度
N = 2 * n + 1 # 填充后的字符长度,去除尾部多余的填充
labels = inpt[:N]
labels2 = T.concatenate((labels, [blank, blank]))
sec_diag = T.neq(labels2[:-2], labels2[2:]) * T.eq(labels2[1:-1], blank)
recurrence_relation = \
T.eye(N) + \
T.eye(N, k=1) + \
T.eye(N, k=2) * sec_diag.dimshuffle((0, 'x'))
pred_y = outpt[:, labels]
fwd_pbblts, _ = theano.scan(
lambda curr, accum: T.switch(T.eq(curr*T.dot(accum, recurrence_relation), 0.0),
T.dot(accum, recurrence_relation)
, curr*T.dot(accum, recurrence_relation)),
sequences=[pred_y],
outputs_info=[T.eye(N)[0]]
)
#return fwd_pbblts
#liklihood = fwd_pbblts[0, 0]
liklihood = fwd_pbblts[-1, -1] + fwd_pbblts[-1, -2]
#liklihood = T.switch(T.lt(liklihood, 1e-35), 1e-35, liklihood)
#loss = -T.log(T.cast(liklihood, "float32"))
#loss = 10 * (liklihood - 1) * (liklihood - 100)
loss = (T.le(liklihood, 1.0)*(10*(liklihood-1)*(liklihood-100)))+(T.gt(liklihood, 1.0)*(-T.log(T.cast(liklihood, "float32"))))
return loss
示例12: chi2_test_statistic
def chi2_test_statistic(M, Obs, K, num_M, num_Obs):
#Getting frequencies from observations
Ns = T.dot(Obs,T.ones((K,1)))
p = Obs/Ns
#Find the zeros so we can deal with them later
pZEROs = T.eq(p, 0)
mZEROs = T.eq(M, 0)
#log probabilities, with -INF as log(0)
lnM = T.log(M + mZEROs) - INF*mZEROs
lnp = T.log(p + pZEROs) - INF*pZEROs
#Using kroneker products so every row of M hits every row of P in the difference klnM - kln
O_ones = T.ones((num_Obs,1))
M_ones = T.ones((num_M,1))
klnM = kron(lnM,O_ones)
klnP = kron(M_ones, lnp)
klnP_M = klnP - klnM
kObs = kron(M_ones, Obs)
G = 2.0*T.dot(klnP_M ,kObs.T)
G = G*T.identity_like(G)
G = T.dot(G,T.ones((num_M*num_Obs,1)))
G = T.reshape(G,(num_M,num_Obs))
#The following quotient improves the convergence to chi^2 by an order of magnitude
#source: http://en.wikipedia.org/wiki/Multinomial_test
#numerator = T.dot(- 1.0/(M + 0.01),T.ones((K,1))) - T.ones((num_M,1))
#q1 = T.ones((num_M,num_Obs)) + T.dot(numerator,1.0/Ns.T/6.0)/(K-1.0)
return G#/q1
示例13: compute_cost_log_in_parallel
def compute_cost_log_in_parallel(original_rnn_outputs, labels, func, x_ends, y_ends):
mask = T.log(1 - T.or_(T.eq(labels, T.zeros_like(labels)), T.eq(labels, shift_matrix(labels, 2))))
initial_state = T.log(T.zeros_like(labels))
initial_state = T.set_subtensor(initial_state[:,0], 0)
def select_probabilities(rnn_outputs, label):
return rnn_outputs[:,label]
rnn_outputs, _ = theano.map(select_probabilities, [original_rnn_outputs, labels])
rnn_outputs = T.log(rnn_outputs.dimshuffle((1,0,2)))
def forward_step(probabilities, last_probabilities):
all_forward_probabilities = T.stack(
last_probabilities + probabilities,
log_shift_matrix(last_probabilities, 1) + probabilities,
log_shift_matrix(last_probabilities, 2) + probabilities + mask,
)
result = func(all_forward_probabilities, 0)
return result
forward_probabilities, _ = theano.scan(fn = forward_step, sequences = rnn_outputs, outputs_info = initial_state)
forward_probabilities = forward_probabilities.dimshuffle((1,0,2))
def compute_cost(forward_probabilities, x_end, y_end):
return -func(forward_probabilities[x_end-1,y_end-2:y_end])
return theano.map(compute_cost, [forward_probabilities, x_ends, y_ends])[0]
示例14: form_dataset
def form_dataset(doc, n_in):
"""
Given a document and the number of input units, return the vector form of the document segmented into units of
length (n_in + 1)
:param doc: String : Location of doc.
:param n_in: Number of input units of the TreeLSTM
:return: return the vector form of the document segmented into units of length(n_in + 1)
"""
print 'Calling form_dataset()..'
doc_obj = open(doc)
data = tokenize(doc_obj.read().lower())
data = data[:int(len(data)/(n_in+1)) * (n_in+1)]
n_sen = len(data)/(n_in+1)
data_x, data_y = np.asarray(data).reshape((n_sen, (n_in+1)))[:, :n_in], \
np.asarray(data).reshape((n_sen, (n_in+1)))[:, -1]
data_x_vec = np.asarray([sentence_vec(data_x[i], word_vecs) for i in range(len(data_x))], dtype=theano.config.floatX)
shared_x = theano.shared(np.concatenate(data_x_vec, axis=1), name='vec_data_x', borrow=True)
shared_x_ = assert_op(shared_x, T.eq(shared_x.get_value().shape[0], vec_dims),
T.eq(shared_x.get_value().shape[1], n_sen*n_in))
shared_y = theano.shared(np.asarray(sentence_vec(data_y, word_vecs),
dtype=theano.config.floatX), name='vec_data_y', borrow=True)
shared_y_ = assert_op(shared_y, T.eq(shared_y.get_value().shape[0], vec_dims),
T.eq(shared_y.get_value().shape[1], n_sen))
doc_obj.close()
# Shape(vec_data_y) reshaped from Number of sentences * Vector Dimensions * 1 to Number of sentences * Vector Dims
return shared_x_, shared_y_
示例15: pp_errors
def pp_errors(self, y, prob , ioi):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
ioi: the index that you are interested in.
prob: the prob, which is p_y_given_x
"""
#prob = 0.5
#ioi = 1
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
#return T.mean(T.neq(self.y_pred, y))
inprob=self.p_y_given_x[:,ioi]
pt1 = T.gt(inprob, prob)
pt2 = T.eq(self.y_pred,ioi)
pt3 = T.eq(y,ioi)
ppn = T.sum(pt1 & pt2 & pt3)
predn = T.sum(pt1 & pt2)
#return (predn,ppn)
#return T.sum(T.eq(self.y_pred, y))
return (ppn,predn)
else:
raise NotImplementedError()