本文整理汇总了Python中theano.tensor.nonzero函数的典型用法代码示例。如果您正苦于以下问题:Python nonzero函数的具体用法?Python nonzero怎么用?Python nonzero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nonzero函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: nll2
def nll2(self, y):
# for predicting whether a course is taken
return -T.mean(
T.log(self.output)[T.nonzero(y)]
) - T.mean(
T.log(1 - self.output)[T.nonzero(1 - y)]
)
示例2: __init__
def __init__(self, rng, batchsize, epochs=100, alpha=0.001, beta1=0.9, beta2=0.999, eps=1e-08, l1_weight=0.0, l2_weight=0.1, cost='mse'):
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.l1_weight = l1_weight
self.l2_weight = l2_weight
self.rng = rng
self.theano_rng = RandomStreams(rng.randint(2 ** 30))
self.epochs = epochs
self.batchsize = batchsize
# Where cost is always the cost which is minimised in supervised training
# the T.nonzero term ensures that the cost is only calculated for examples with a label
#
# Convetion: We mark unlabelled examples with a vector of zeros in lieu of a one-hot vector
if cost == 'mse':
self.y_pred = lambda network, x: network(x)
self.error = lambda network, y_pred, y: T.zeros((1,))
self.cost = lambda network, x, y: T.mean((network(x)[T.nonzero(y)] - y[T.nonzero(y)]**2))
elif cost == 'binary_cross_entropy':
self.y_pred = lambda network, x: network(x)
self.cost = lambda network, y_pred, y: T.nnet.binary_crossentropy(y_pred[T.nonzero(y)], y[T.nonzero(y)]).mean()
# classification error
self.error = lambda network, y_pred, y: T.mean(T.neq(T.argmax(y_pred, axis=1), T.argmax(y, axis=1)))
elif cost == 'cross_entropy':
self.y_pred = lambda network, x: network(x)
self.cost = lambda network, y_pred, y: T.nnet.categorical_crossentropy(y_pred[T.nonzero(y)], y[T.nonzero(y)]).mean()
# classification error
self.error = lambda network, y_pred, y: T.mean(T.neq(T.argmax(y_pred, axis=1), T.argmax(y, axis=1)))
else:
self.y_pred = lambda network, x: network(x)
self.error = lambda network, y_pred, y: T.zeros((1,))
self.cost = cost
示例3: categorical_crossentropy_segm
def categorical_crossentropy_segm(prediction_proba, targets):
'''
MODIFICATIONS:
- reshape from image-size to array and back
'''
shape = T.shape(prediction_proba)
pred_mod1 = T.transpose(prediction_proba, (0,2,3,1))
pred_mod = T.reshape(pred_mod1, (-1,shape[1]))
if prediction_proba.ndim == targets.ndim:
targ_mod1 = T.transpose(targets,(0,2,3,1))
targ_mod = T.reshape(targ_mod1,(-1,shape[1]))
else:
targ_mod = T.reshape(targets, (-1,))
results = categorical_crossentropy(pred_mod, targ_mod)
results = T.reshape(results, (shape[0],shape[2],shape[3]))
# QUICK IMPLEMENTATION FOR TWO SPECIFIC CLASSES. NEEDS GENERALIZATION
# Weights depending on class occurency:
weights = (1.02275, 44.9647)
cars_indx, not_cars_indx = T.nonzero(targets), T.nonzero(T.eq(targets,0))
T.set_subtensor(results[cars_indx], results[cars_indx]*float32(weights[1]) )
T.set_subtensor(results[not_cars_indx], results[not_cars_indx]*float32(weights[0]) )
return T.sum(results, axis=(1,2))
示例4: unet_crossentropy_loss_sampled
def unet_crossentropy_loss_sampled(y_true, y_pred):
print 'unet_crossentropy_loss_sampled'
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
indNeg = T.nonzero(1-y_true)[0]
# shuffle
n = indPos.shape[0]
indPos = indPos[srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[srng.permutation(n=n)]
# take equal number of samples depending on which class has less
n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')
indPos = indPos[:n_samples]
indNeg = indNeg[:n_samples]
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
average_loss = T.mean(loss_vector)
print 'average_loss:', average_loss
return average_loss
示例5: prepare_loss
def prepare_loss(inputlayer, outlayer, pairs, types, loss_function,
entropy_penalty=0, V=None, lamb=-1, train_pass=False):
# reshape to 2d before sending through the network,
# after which the original shape is recovered
output = outlayer.output(
{inputlayer: pairs.reshape((-1, pairs.shape[-1]))},
train_pass=train_pass).reshape((pairs.shape[0], 2, -1))
x1, x2 = output[:,0], output[:,1]
cost = loss_function(x1, x2, types)
same_loss = cost[T.nonzero(types)].mean()
diff_loss = cost[T.nonzero(1 - types)].mean()
if lamb >= 0:
cost = 1 / (lamb + 1) * same_loss + lamb / (lamb + 1) * diff_loss
else:
cost = cost.mean()
ent = entropy_loss(x1, x2)
total_cost = cost + entropy_penalty * ent
if V is not None:
return total_cost, cost, same_loss, diff_loss, ent, calculate_spread(V)
else:
return total_cost, cost, same_loss, diff_loss, ent
示例6: past_weight_grad_step
def past_weight_grad_step(xs, es, kp_x, kd_x, kp_e, kd_e, shape, dws=None):
"""
Do an efficient update of the weights given the two spike-update.
(This still runs FING SLOWLY!)
:param xs: An (n_in) vector
:param es: An (n_out) vector
:param kp_x:
:param kd_x:
:param kp_e:
:param kd_e:
:param shapes: (n_in, n_out)
:return:
"""
kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
n_in, n_out = shape
rx = kd_x/(kp_x+kd_x)
re = kd_e/(kp_e+kd_e)
tx_last = create_shared_variable(np.zeros(n_in)+1)
te_last = create_shared_variable(np.zeros(n_out)+1)
x_last = create_shared_variable(np.zeros(n_in))
e_last = create_shared_variable(np.zeros(n_out))
x_spikes = tt.neq(xs, 0)
e_spikes = tt.neq(es, 0)
x_spike_ixs, = tt.nonzero(x_spikes)
e_spike_ixs, = tt.nonzero(e_spikes)
if dws is None:
dws = tt.zeros(shape)
t_last = tt.minimum(tx_last[x_spike_ixs, None], te_last) # (n_x_spikes, n_out)
dws = tt.inc_subtensor(dws[x_spike_ixs, :], x_last[x_spike_ixs, None]*e_last
* rx**(tx_last[x_spike_ixs, None]-t_last)
* re**(te_last[None, :]-t_last)
* geoseries_sum(re*rx, t_end=t_last, t_start=1)
)
new_x_last = tt.set_subtensor(x_last[x_spike_ixs], x_last[x_spike_ixs]*rx**tx_last[x_spike_ixs]+ xs[x_spike_ixs]/as_floatx(kd_x))
new_tx_last = tt.switch(x_spikes, 0, tx_last)
t_last = tt.minimum(new_tx_last[:, None], te_last[e_spike_ixs]) # (n_in, n_e_spikes)
dws = tt.inc_subtensor(dws[:, e_spike_ixs], new_x_last[:, None]*e_last[e_spike_ixs]
* rx**(new_tx_last[:, None]-t_last)
* re**(te_last[None, e_spike_ixs]-t_last)
* geoseries_sum(re*rx, t_end=t_last, t_start=1)
)
add_update(x_last, new_x_last)
add_update(e_last, tt.set_subtensor(e_last[e_spike_ixs], e_last[e_spike_ixs]*re**te_last[e_spike_ixs]+ es[e_spike_ixs]/as_floatx(kd_e)))
add_update(tx_last, new_tx_last+1)
add_update(te_last, tt.switch(e_spikes, 1, te_last+1))
return dws
示例7: logp_theano_comorbidities
def logp_theano_comorbidities(logLike,nObs,B0,B,X,S,T):
logLike = 0.0
#Unwrap t=0 points for B0
zeroIndices = np.roll(T.cumsum(),1)
#zeroIndices = np.roll(T.cumsum(),1)
zeroIndices[0] = 0;
zeroIndices = zeroIndices.astype('int32')
#import pdb; pdb.set_trace()
#Likelihood from B0 for X=1 and X=0 cases
logLike += (X[zeroIndices]*TT.log(B0[:,S[zeroIndices]]).T).sum()
logLike += ((1-X[zeroIndices])*TT.log(1.-B0[:,S[zeroIndices]]).T).sum()
stateChange = S[1:]-S[:-1]
# Don't consider t=0 points
#import pdb; pdb.set_trace()
#setZero = TT.as_tensor_variable(zeroIndices[1:]-1)
#TT.set_subtensor(stateChange[setZero],0)
stateChange = TT.set_subtensor(stateChange[zeroIndices[1:]-1],0)
#stateChange[setZero] = 0
#stateChange[zeroIndices[1:]-1] = 0
changed = TT.nonzero(stateChange)[0]+1
#import pdb; pdb.set_trace()
# A change can only happen from 0 to 1 given our assumptions
logLike += ((X[changed]-X[changed-1])*TT.log(B[:,S[changed]]).T).sum()
logLike += (((1-X[changed])*(1-X[changed-1]))*TT.log(1.-B[:,S[changed]]).T).sum()
#logLike += (X[changed]*np.log(B[:,S[changed]]).T).sum()
return logLike
示例8: train_batch
def train_batch(self, batch_size):
T = self.AE.T
T = T.tocsr()
nonzero_indices = T.nonzero()
#pdb.set_trace()
n_users = len(np.unique(nonzero_indices[0]))
indices = np.unique(nonzero_indices[0])
for epoch in xrange(self.epochs):
l = []
for ind, i in enumerate(xrange(0, n_users, batch_size)):
# CHECK : SEEMS BUGGY.
#------------------------
#ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)
ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)
#------------------------
#print ratings
#pdb.set_trace()
loss = self.AE.ae_batch(ratings)
#loss = self.AE.debug(ratings)
#print loss
#pdb.set_trace()
l.append(loss)
m = np.mean(np.array(l))
print("mean Loss for epoch %d batch %d is %f"%(epoch, ind, m))
rmse = self.RMSE_sparse()
print("RMSE after one epoch is %f"%(rmse))
f.write(str(rmse) + '\n')
示例9: exp
def exp(self, X, U):
norm_U = tensor.sqrt(tensor.sum((U ** 2), axis=0)).reshape((1, self._n))
Y = X * tensor.cos(norm_U) + U * (tensor.sin(norm_U) / norm_U)
# For those columns where the step is too small, use a retraction.
exclude = tensor.nonzero(norm_U <= 4.5e-8)[-1]
Y[:, exclude] = self._normalize_columns(X[:, exclude] + U[:, exclude])
return Y
示例10: MASK_blanking
def MASK_blanking(x_i):
# Find indicies of first and last non-zero value in x_i
idxs = T.nonzero(x_i)[0][[1, -1]]
# Diff = no of non zero values
no_values = idxs[1] - idxs[0]
# Move index inside by proportion of no of values
idxs0 = T.cast(T.floor(idxs[0] + no_values * blank_proportion), 'int32')
idxs1 = T.cast(T.floor(idxs[1] - no_values * blank_proportion), 'int32')
# Return a vector that has a tighter mask than x_i
return T.set_subtensor(T.zeros_like(x_i)[idxs0:idxs1], T.alloc(1., idxs1-idxs0))
示例11: add_synap_post_inp
def add_synap_post_inp(i,po,p,s,q):
# i:: sequence
# po:: post
# p:: pre
# s:: dA
# q:: W
index = T.nonzero(q[:self.Ne,i])
npo = T.inc_subtensor(po[index,i],s)
nw = T.inc_subtensor(q[:,i],p[:,i])
nw = T.clip(nw,0,self.wmax)
return {po:npo,q:nw}
示例12: unet_crossentropy_loss_sampled
def unet_crossentropy_loss_sampled(y_true, y_pred):
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
indNeg = T.nonzero(1-y_true)[0]
# shuffle
n = indPos.shape[0]
indPos = indPos[srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[srng.permutation(n=n)]
# subset assuming each class has at least 100 samples present
indPos = indPos[:200]
indNeg = indNeg[:200]
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
average_loss = T.mean(loss_vector)
return average_loss
示例13: fprop_step
def fprop_step(state_below, index, state_before, W, U, b):
state_now = state_before.copy()
index = self.num_modules -\
tensor.nonzero(tensor.mod(index+1, self.M))[0].shape[0]
this_range = index * self.module_dim
z = tensor.dot(state_below, W[:, :this_range]) +\
tensor.dot(state_before, U[:, :this_range]) +\
b[:this_range]
z = tensor.tanh(z)
state_now = tensor.set_subtensor(state_now[:, :this_range], z)
return state_now
示例14: add_synap_pre_inp
def add_synap_pre_inp(i,p,po,s,q):
# i :: sequence
# p :: pre | post
# s :: dApre | dApost
# q :: W
index = T.nonzero(q[i,:self.Ne])
np = T.inc_subtensor(p[i,index],s)
## tmp = p[i,:]
## tmp=T.inc_subtensor(tmp[index],s)
## np=T.set_subtensor(p[i,:],tmp)
#np = T.inc_subtensor(p[i,:],s)
nw = T.inc_subtensor(q[i,:],po[i,:])
nw=T.clip(nw,0,self.wmax)
return {p:np,q:nw}
示例15: train_batch
def train_batch(self, batch_size):
T = self.AE.T
T = T.tocsr()
nonzero_indices = T.nonzero()
#pdb.set_trace()
n_users = len(np.unique(nonzero_indices[0]))
indices = np.unique(nonzero_indices[0])
for epoch in xrange(self.epochs):
for ind, i in enumerate(xrange(0, n_users, batch_size)):
ratings = T[indices[i:(i + batch_size)], :].toarray().astype(np.float32)
#print ratings
#pdb.set_trace()
loss = self.AE.ae_batch(ratings)
#loss = self.AE.debug(ratings)
print loss