本文整理汇总了Python中theano.tensor.neq函数的典型用法代码示例。如果您正苦于以下问题:Python neq函数的具体用法?Python neq怎么用?Python neq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了neq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: errors
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
#raise NotImplementedError()
# print y.shape[0]
# for i in range(1, y.shape[0].eval()):
# print('%f | %f' % (self.y_pred[i], y[i]))
#print T.mean(T.neq(self.y_pred, y))
#print self.y_pred.eval()
return T.mean(T.neq(self.y_pred, y))
示例2: step
def step(self, y_m, yb_m, hf, cf, hb, cb):
# y_m/yb_m are what shape? should be batch_size (x 1)
print y_m.ndim
# one-hot encode y,yb (NEED TO SAVE PREVIOUS VALUES FOR MASKING!!!)
y = to_one_hot(y_m, self.bs, self.K)
yb = to_one_hot(yb_m, self.bs, self.K)
# get forward and backward inputs values
y_f_in = self.forward_in.run(y)
y_b_in = self.backward_in.run(yb)
# run forward and backward LSTMs
hf_t,cf_t = self.forward_lstm.run(y_f_in, hf, cf)
hb_t,cb_t = self.backward_lstm.run(y_b_in, hb, cb)
# but only if y/yb is not 0 (apply mask)
mask_y = y_m.reshape((self.bs, 1))#.repeat(self.m//2, axis=1) # these lines *shouldnt* be needed...
mask_yb = yb_m.reshape((self.bs, 1))#.repeat(self.m//2, axis=1)
hf = T.switch(T.neq(mask_y, 0), hf_t, hf)
cf = T.switch(T.neq(mask_y, 0), cf_t, cf)
# and backward
hb = T.switch(T.neq(mask_yb, 0), hb_t, hb)
cb = T.switch(T.neq(mask_yb, 0), cb_t, cb)
# return the new values
return hf,cf,hb,cb
示例3: errors
def errors(self, y, mean = False):
if not self.CONNECTED:
raise RuntimeError("Asked to compute errors, but I'm not connected atm")
if mean:
return T.mean(T.neq(self.y_pred, y))
else:
return T.neq(self.y_pred, y)
示例4: matrix_weight_grad_calculator
def matrix_weight_grad_calculator(xs, es, kp_x, kd_x, kp_e, kd_e, shapes, epsilon=1e-7):
"""
:param xs:
:param es:
:param kp_x:
:param kd_x:
:param kp_e:
:param kd_e:
:param shapes:
:param epsilon:
:return:
"""
kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
n_samples, n_in, n_out = shapes
v1 = create_shared_variable(np.zeros((n_samples, n_in, n_out)))
rx = kd_x/(kp_x+kd_x)
re = kd_e/(kp_e+kd_e)
xr = create_shared_variable(np.zeros((n_samples, n_in)))
er = create_shared_variable(np.zeros((n_samples, n_out)))
x_spikes = tt.neq(xs, 0)
e_spikes = tt.neq(es, 0)
xr_decayed = xr*rx
er_decayed = er*re
spikes = tt.bitwise_or(x_spikes[:, :, None], e_spikes[:, None, :])
v2 = xr_decayed[:, :, None]*er_decayed[:, None, :]
dws = (spikes*(v2-v1))/(rx*re-1)
new_xr = xr_decayed + xs/(kp_x+kd_x)
new_er = er_decayed + es/(kp_e+kd_e)
add_update(v1, tt.switch(spikes, new_xr[:, :, None]*new_er[:, None, :], v1))
add_update(xr, new_xr)
add_update(er, new_er)
return dws.sum(axis=0)
示例5: ber
def ber(y, pred):
a = (tensor.neq(y, 1) * tensor.neq(pred, 1)).sum()
b = (tensor.neq(y, 1) * tensor.eq(pred, 1)).sum()
c = (tensor.eq(y, 1) * tensor.neq(pred, 1)).sum()
d = (tensor.eq(y, 1) * tensor.eq(pred, 1)).sum()
[a, b, c, d] = [tensor.cast(x, dtype=theano.config.floatX) for x in [a, b, c, d]]
return (b / (a + b) + c / (c + d)) / numpy.float32(2)
示例6: get_tagging_channels_from_state
def get_tagging_channels_from_state(self, state, target):
missingValuesFilter = T.neq(target, -1)
rval = OrderedDict()
y_hat = state > 0.5
y = target > 0.5
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype) * missingValuesFilter
rval['mistagging'] = T.cast(wrong_bit.sum() / missingValuesFilter.sum(),
state.dtype)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat * missingValuesFilter).sum()
fp = ((1-y) * y_hat * missingValuesFilter).sum()
precision = tp / T.maximum(1., tp + fp)
recall = tp / T.maximum(1., (y * missingValuesFilter).sum())
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = 2. * precision * recall / T.maximum(1, precision + recall)
tp = (y * y_hat * missingValuesFilter).sum(axis=0)
fp = ((1-y) * y_hat * missingValuesFilter).sum(axis=0)
precision = tp / T.maximum(1., tp + fp)
rval['per_output_precision.max'] = precision.max()
rval['per_output_precision.mean'] = precision.mean()
rval['per_output_precision.min'] = precision.min()
recall = tp / T.maximum(1., (y * missingValuesFilter).sum(axis=0))
rval['per_output_recall.max'] = recall.max()
rval['per_output_recall.mean'] = recall.mean()
rval['per_output_recall.min'] = recall.min()
f1 = 2. * precision * recall / T.maximum(1, precision + recall)
rval['per_output_f1.max'] = f1.max()
rval['per_output_f1.mean'] = f1.mean()
rval['per_output_f1.min'] = f1.min()
# Add computation of the mean average recision
from pylearn2_ECCV2014 import meanAvgPrec
(rval['min_avg_prec'],
rval['mean_avg_prec'],
rval['max_avg_prec'],
rval['mean_avg_prec_AnswerPhone'],
rval['mean_avg_prec_DriveCar'],
rval['mean_avg_prec_Eat'],
rval['mean_avg_prec_FightPerson'],
rval['mean_avg_prec_GetOutCar'],
rval['mean_avg_prec_HandShake'],
rval['mean_avg_prec_HugPerson'],
rval['mean_avg_prec_Kiss'],
rval['mean_avg_prec_Run'],
rval['mean_avg_prec_SitDown'],
rval['mean_avg_prec_SitUp'],
rval['mean_avg_prec_StandUp']) = meanAvgPrec.meanAveragePrecisionTheano(target, state)
return rval
示例7: theano_metrics
def theano_metrics(y_pred, y_true, n_classes, void_labels):
"""
Returns the intersection I and union U (to compute the jaccard I/U) and the accuracy.
:param y_pred: tensor of predictions. shape (b*0*1, c) with c = n_classes
:param y_true: groundtruth, shape (b,0,1) or (b,c,0,1) with c=1
:param n_classes: int
:param void_labels: list of indexes of void labels
:return: return tensors I and U of size (n_classes), and scalar acc
"""
# Put y_pred and y_true under the same shape
y_true = T.flatten(y_true)
y_pred = T.argmax(y_pred, axis=1)
# We use not_void in case the prediction falls in the void class of the groundtruth
for i in range(len(void_labels)):
if i == 0:
not_void = T.neq(y_true, void_labels[i])
else:
not_void = not_void * T.neq(y_true, void_labels[i])
I = T.zeros(n_classes)
U = T.zeros(n_classes)
for i in range(n_classes):
y_true_i = T.eq(y_true, i)
y_pred_i = T.eq(y_pred, i)
I = T.set_subtensor(I[i], T.sum(y_true_i * y_pred_i))
U = T.set_subtensor(U[i], T.sum(T.or_(y_true_i, y_pred_i) * not_void))
accuracy = T.sum(I) / T.sum(not_void)
return I, U, accuracy
示例8: trainer
def trainer(X,Y,alpha,lr,predictions,updates,data,labels):
data = U.create_shared(data, dtype=np.int8)
labels = U.create_shared(labels,dtype=np.int8)
index_start = T.lscalar('start')
index_end = T.lscalar('end')
print "Compiling function..."
train_model = theano.function(
inputs = [index_start,index_end,alpha,lr],
outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
updates = updates,
givens = {
X: data[index_start:index_end],
Y: labels[index_start:index_end]
}
)
test_model = theano.function(
inputs = [index_start,index_end],
outputs = T.mean(T.neq(T.argmax(predictions, axis=1), Y)),
givens = {
X: data[index_start:index_end],
Y: labels[index_start:index_end]
}
)
print "Done."
return train_model,test_model
示例9: __init__
def __init__(self, rng, batchsize, epochs=100, alpha=0.001, beta1=0.9, beta2=0.999, eps=1e-08, l1_weight=0.0, l2_weight=0.1, cost='mse'):
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.l1_weight = l1_weight
self.l2_weight = l2_weight
self.rng = rng
self.theano_rng = RandomStreams(rng.randint(2 ** 30))
self.epochs = epochs
self.batchsize = batchsize
# Where cost is always the cost which is minimised in supervised training
# the T.nonzero term ensures that the cost is only calculated for examples with a label
#
# Convetion: We mark unlabelled examples with a vector of zeros in lieu of a one-hot vector
if cost == 'mse':
self.y_pred = lambda network, x: network(x)
self.error = lambda network, y_pred, y: T.zeros((1,))
self.cost = lambda network, x, y: T.mean((network(x)[T.nonzero(y)] - y[T.nonzero(y)]**2))
elif cost == 'binary_cross_entropy':
self.y_pred = lambda network, x: network(x)
self.cost = lambda network, y_pred, y: T.nnet.binary_crossentropy(y_pred[T.nonzero(y)], y[T.nonzero(y)]).mean()
# classification error
self.error = lambda network, y_pred, y: T.mean(T.neq(T.argmax(y_pred, axis=1), T.argmax(y, axis=1)))
elif cost == 'cross_entropy':
self.y_pred = lambda network, x: network(x)
self.cost = lambda network, y_pred, y: T.nnet.categorical_crossentropy(y_pred[T.nonzero(y)], y[T.nonzero(y)]).mean()
# classification error
self.error = lambda network, y_pred, y: T.mean(T.neq(T.argmax(y_pred, axis=1), T.argmax(y, axis=1)))
else:
self.y_pred = lambda network, x: network(x)
self.error = lambda network, y_pred, y: T.zeros((1,))
self.cost = cost
示例10: get_output_for
def get_output_for(self, input, **kwargs):
'''
The input is a batch of matrices of word vectors.
The output the sum of the word embeddings divided by the number of
non-zero word embeddings in the input.
The idea with the normalisers is similar as in the normal averageLayer
'''
# Sums of word embeddings (so the zero embeddings don't matter here)
sums = input.sum(axis=2)
# Can we do this cheaper (as in, more efficient)?
# NOTE that we explicitly cast the output of the last sum() to floatX
# as otherwise Theano will cast the result of 'sums / normalizers' to
# float64
normalisers = T.neq((T.neq(input, 0.0)).sum(axis=3, dtype='int32'), 0.0).sum(axis=2, dtype='floatX').reshape((-1, self.iNrOfSentences, 1))
averages = sums / normalisers
if self.fGradientClippingBound is not None:
averages = theano.gradient.grad_clip(averages,
- self.fGradientClippingBound,
self.fGradientClippingBound)
return averages
示例11: nll_simple
def nll_simple(Y, Y_hat,
cost_mask=None,
cost_ent_mask=None,
cost_ent_desc_mask=None):
probs = Y_hat
pred = TT.argmax(probs, axis=1).reshape(Y.shape)
errors = TT.neq(pred, Y)
ent_errors = None
if cost_ent_mask is not None:
pred_ent = TT.argmax(probs * cost_ent_mask.dimshuffle('x', 0),
axis=1).reshape(Y.shape)
ent_errors = TT.neq(pred_ent, Y).mean()
ent_desc_errors = None
if cost_ent_desc_mask is not None:
pred_desc_ent = TT.argmax(probs * cost_ent_desc_mask,
axis=1).reshape(Y.shape)
ent_desc_errors = TT.neq(pred_desc_ent, Y).mean()
LL = TT.log(_grab_probs(probs, Y) + 1e-8).reshape(Y.shape)
if cost_mask is not None:
total = cost_mask * LL
errors = cost_mask * errors
ncosts = TT.sum(cost_mask)
mean_errors = TT.sum(errors) / (ncosts)
ave = -TT.sum(total) / Y.shape[1]
else:
mean_errors = TT.mean(errors)
ave = -TT.sum(LL) / Y.shape[0]
return ave, mean_errors, ent_errors, ent_desc_errors
示例12: errors
def errors(self, y):
if y.dtype.startswith('int') and y.ndim == 3:
mask = T.neq(y, -1)
total = T.sum(mask, dtype='float32')
return T.sum(T.neq(self.y_pred, y)*mask)/total
else:
raise NotImplementedError()
示例13: getRpRnTpTnForTrain0OrVal1
def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
# The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
# Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
# param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]
yPredToUse = self.y_pred_train if training0OrValidation1 == 0 else self.y_pred_val
checkDimsOfYpredAndYEqual(y, yPredToUse, "training" if training0OrValidation1 == 0 else "validation")
returnedListWithNumberOfRpRnTpTnForEachClass = []
for class_i in xrange(0, self._numberOfOutputClasses) :
#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
tensorOneAtRealPos = T.eq(y, class_i)
tensorOneAtRealNeg = T.neq(y, class_i)
tensorOneAtPredictedPos = T.eq(yPredToUse, class_i)
tensorOneAtPredictedNeg = T.neq(yPredToUse, class_i)
tensorOneAtTruePos = T.and_(tensorOneAtRealPos,tensorOneAtPredictedPos)
tensorOneAtTrueNeg = T.and_(tensorOneAtRealNeg,tensorOneAtPredictedNeg)
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealPos) )
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealNeg) )
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTruePos) )
returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTrueNeg) )
return returnedListWithNumberOfRpRnTpTnForEachClass
示例14: f1_score
def f1_score(self, y, labels=[0, 2]):
"""
Mean F1 score between two classes (positive and negative as specified by the labels array).
"""
y_tr = y
y_pr = self.y_pred
correct = T.eq(y_tr, y_pr)
wrong = T.neq(y_tr, y_pr)
label = labels[0]
tp_neg = T.sum(correct * T.eq(y_tr, label))
fp_neg = T.sum(wrong * T.eq(y_pr, label))
fn_neg = T.sum(T.eq(y_tr, label) * T.neq(y_pr, label))
tp_neg = T.cast(tp_neg, theano.config.floatX)
prec_neg = tp_neg / T.maximum(1, tp_neg + fp_neg)
recall_neg = tp_neg / T.maximum(1, tp_neg + fn_neg)
f1_neg = 2. * prec_neg * recall_neg / T.maximum(1, prec_neg + recall_neg)
label = labels[1]
tp_pos = T.sum(correct * T.eq(y_tr, label))
fp_pos = T.sum(wrong * T.eq(y_pr, label))
fn_pos = T.sum(T.eq(y_tr, label) * T.neq(y_pr, label))
tp_pos = T.cast(tp_pos, theano.config.floatX)
prec_pos = tp_pos / T.maximum(1, tp_pos + fp_pos)
recall_pos = tp_pos / T.maximum(1, tp_pos + fn_pos)
f1_pos = 2. * prec_pos * recall_pos / T.maximum(1, prec_pos + recall_pos)
return 0.5 * (f1_pos + f1_neg) * 100
示例15: each_loss
def each_loss(outpt, inpt):
# y 是填充了blank之后的ans
blank = 26
y_nblank = T.neq(inpt, blank)
n = T.dot(y_nblank, y_nblank) # 真实的字符长度
N = 2 * n + 1 # 填充后的字符长度,去除尾部多余的填充
labels = inpt[:N]
labels2 = T.concatenate((labels, [blank, blank]))
sec_diag = T.neq(labels2[:-2], labels2[2:]) * T.eq(labels2[1:-1], blank)
recurrence_relation = \
T.eye(N) + \
T.eye(N, k=1) + \
T.eye(N, k=2) * sec_diag.dimshuffle((0, 'x'))
pred_y = outpt[:, labels]
fwd_pbblts, _ = theano.scan(
lambda curr, accum: T.switch(T.eq(curr*T.dot(accum, recurrence_relation), 0.0),
T.dot(accum, recurrence_relation)
, curr*T.dot(accum, recurrence_relation)),
sequences=[pred_y],
outputs_info=[T.eye(N)[0]]
)
#return fwd_pbblts
#liklihood = fwd_pbblts[0, 0]
liklihood = fwd_pbblts[-1, -1] + fwd_pbblts[-1, -2]
#liklihood = T.switch(T.lt(liklihood, 1e-35), 1e-35, liklihood)
#loss = -T.log(T.cast(liklihood, "float32"))
#loss = 10 * (liklihood - 1) * (liklihood - 100)
loss = (T.le(liklihood, 1.0)*(10*(liklihood-1)*(liklihood-100)))+(T.gt(liklihood, 1.0)*(-T.log(T.cast(liklihood, "float32"))))
return loss