本文整理汇总了Python中theano.tensor.lt函数的典型用法代码示例。如果您正苦于以下问题:Python lt函数的具体用法?Python lt怎么用?Python lt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: irprop_minus_updates
def irprop_minus_updates(params, grads):
# IRPROP- parameters
updates = []
deltas = 0.1*numpy.ones(len(params))
last_params = params
positiveStep = 1.2
negativeStep = 0.5
maxStep = 50.
minStep = math.exp(-6)
for param, gparam, delta, last_gparam in zip(params, grads, deltas, last_params):
# calculate change
change = T.sgn(gparam * last_gparam)
if T.gt(change, 0) :
delta = T.minimum(delta * positiveStep, maxStep)
if T.lt(delta, minStep):
delta = minStep
elif T.lt(change, 0):
delta = T.maximum(delta * negativeStep, minStep)
if T.gt(delta, params['maxStep']):
delta = params['maxStep']
last_gparam = 0
# update the weights
updates.append((param, param - T.sgn(gparam) * delta))
# store old change
last_gparam = gparam
return updates
示例2: _backward_negative_z
def _backward_negative_z(inputs, weights, normed_relevances, bias=None):
inputs_plus = inputs * T.gt(inputs, 0)
weights_plus = weights * T.gt(weights, 0)
inputs_minus = inputs * T.lt(inputs, 0)
weights_minus = weights * T.lt(weights, 0)
# Compute weights+ * inputs- and weights- * inputs+
negative_part_a = conv2d(
normed_relevances, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
negative_part_a *= inputs_minus
negative_part_b = conv2d(
normed_relevances, weights_minus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
negative_part_b *= inputs_plus
together = negative_part_a + negative_part_b
if bias is not None:
bias_negative = bias * T.lt(bias, 0)
bias_relevance = bias_negative.dimshuffle("x", 0, "x", "x") * normed_relevances
# Divide bias by weight size before convolving back
# mean across channel, 0, 1 dims (hope this is correct?)
fraction_bias = bias_relevance / T.prod(weights.shape[1:]).astype(theano.config.floatX)
bias_rel_in = conv2d(
fraction_bias, T.ones_like(weights).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
together += bias_rel_in
return together
示例3: build_update
def build_update(self, alpha=0.01, beta=0.0):
W = self.W
lambda_mult=self.lambda_mult
y=self.y
C = self.C
lower_bound = theano.shared(np.float32(0.0))
updates = build_gradDescent_step(W, lambda_mult, alpha,beta)
updatelambda_mult = updates[1] # \Longleftrightarrow <<===>> \lambda_i'(t+1)
updatelambda_mult = updatelambda_mult - T.dot(y,updatelambda_mult)/T.dot(y,y) * y # Longleftrightarrow <<===>> \lambda_i''(t+1)
# use theano.tensor.switch because we need an elementwise comparison
# if \lambda_I''(t+1)> C, C
updatelambda_mult = T.switch( T.lt( C , updatelambda_mult), C, updatelambda_mult)
updatelambda_mult = T.switch( T.lt( updatelambda_mult,lower_bound), lower_bound, updatelambda_mult)
updatelambda_mult = sandbox.cuda.basic_ops.gpu_from_host( updatelambda_mult)
updatefunction = theano.function(inputs=[],
outputs = W,
updates=[(lambda_mult, updatelambda_mult)])
self._update_lambda_mult_graph = updatelambda_mult
self.update_function = updatefunction
return updatelambda_mult, updatefunction
示例4: theano_digitize
def theano_digitize(x, bins):
"""
Equivalent to numpy digitize.
Parameters
----------
x : Theano tensor or array_like
The array or matrix to be digitized
bins : array_like
The bins with which x should be digitized
Returns
-------
A Theano tensor
The indices of the bins to which each value in input array belongs.
"""
binned = T.zeros_like(x) + len(bins)
for i in range(len(bins)):
bin=bins[i]
if i == 0:
binned=T.switch(T.lt(x,bin),i,binned)
else:
ineq = T.and_(T.ge(x,bins[i-1]),T.lt(x,bin))
binned=T.switch(ineq,i,binned)
binned=T.switch(T.isnan(x), len(bins), binned)
return binned
示例5: __init__
def __init__(self, random_state=None, low=0.0, high=1.0):
super(Uniform, self).__init__(low=low, high=high,
random_state=random_state,
optimizer=None)
# pdf
self.pdf_ = T.switch(
T.or_(T.lt(self.X, self.low), T.ge(self.X, self.high)),
0.,
1. / (self.high - self.low)).ravel()
self.make_(self.pdf_, "pdf")
# -log pdf
self.nnlf_ = T.switch(
T.or_(T.lt(self.X, self.low), T.ge(self.X, self.high)),
np.inf,
T.log(self.high - self.low)).ravel()
self.make_(self.nnlf_, "nnlf")
# cdf
self.cdf_ = T.switch(
T.lt(self.X, self.low),
0.,
T.switch(
T.lt(self.X, self.high),
(self.X - self.low) / (self.high - self.low),
1.)).ravel()
self.make_(self.cdf_, "cdf")
# ppf
self.ppf_ = self.p * (self.high - self.low) + self.low
self.make_(self.ppf_, "ppf", args=[self.p])
示例6: gradients
def gradients(cost, parameters, lr=0.001):
updates = []
c = 0
for param in parameters:
update = param - lr * theano.grad(cost, param)
if c == 1 or c == 3:
# update = t.minimum(t.abs_(update), np.pi) * (update / abs(update))
#
# update = t.maximum(update, 0)
# update = t.minimum(update, np.pi)
update = ifelse(t.lt(update, 0), np.pi * 2 - 0.001, update)
update = ifelse(t.gt(update, np.pi * 2), 0.001, update)
if c == 2:
update = ifelse(t.lt(update, 2), float(20), update)
elif c == 5 or c == 6:
update = t.maximum(update, -5)
update = t.minimum(update, 5)
updates.append((param, update))
c += 1
return updates
示例7: tnormal_icdf
def tnormal_icdf(size, avg, std, lbound, ubound, theano_rng, dtype):
"""
Alternative Method:
sample = -Phi_inv(Phi(-lbound)*(1-u) + Phi(-ubound)*u)
"""
def Phi(x):
erfarg = (x - avg) / (std * SQRT2)
rval = 0.5 * (1. + T.erf(erfarg))
return rval.astype(dtype)
def Phi_inv(y, eps=3e-8):
""" eps was calibrated for cublas.erfinv using float32 """
temp = 2. * y - 1.
erfinv_input = T.clip(temp, -1+eps, 1-eps)
rval = avg + std * SQRT2 * T.erfinv(erfinv_input)
return rval.astype(dtype)
# center lower and upper bounds based on mean
u = theano_rng.uniform(size=size, dtype=dtype)
# Inverse CDF method. When method becomes numerically unstable, we simply
# return the bounds based on whether avg < lbound, or ubound < avg.
cdf_range = Phi(ubound) - Phi(lbound)
sample = T.switch(
T.or_(
T.lt(cdf_range, 3e-8),
T.gt(cdf_range, 1-3e-8)),
T.switch(
T.lt(avg, lbound),
lbound,
ubound),
Phi_inv(Phi(lbound) + u * cdf_range))
return sample
示例8: generate_subpop_input
def generate_subpop_input(r_E, r_I, n_pairs):
c = T.scalar("c", dtype='float32')
h = T.matrix("h", dtype='float32')
W_EE = T.tensor3("W_EE", dtype='float32')
W_EI = T.tensor3("W_EI", dtype='float32')
W_IE = T.tensor3("W_IE", dtype='float32')
W_II = T.tensor3("W_II", dtype='float32')
r_e = T.matrix("r_e", dtype='float32')
r_i = T.matrix("r_i", dtype='float32')
I_E = T.matrix('I_E', dtype='float32')
I_I = T.matrix('I_I', dtype='float32')
I_thresh_E = T.matrix('I_thresh_E', dtype='float32')
I_thresh_I = T.matrix('I_thresh_I', dtype='float32')
# Compile functions:
I_E = c*h + T.sum(T.sum(W_EE*r_e,1),1).reshape((n_pairs, n_pairs)).T - T.sum(T.sum(W_EI*r_i,1),1).reshape((n_pairs, n_pairs)).T
I_I = c*h + T.sum(T.sum(W_IE*r_e,1),1).reshape((n_pairs, n_pairs)).T - T.sum(T.sum(W_II*r_i,1),1).reshape((n_pairs, n_pairs)).T
I_thresh_E = T.switch(T.lt(I_E,0), 0, I_E)
I_thresh_I = T.switch(T.lt(I_I,0), 0, I_I)
inputs = theano.function(inputs=[c,h,W_EE,W_EI,W_IE,W_II],
outputs=[I_thresh_E, I_thresh_I],
givens={r_e:r_E, r_i:r_I},
allow_input_downcast=True)
return inputs
示例9: __init__
def __init__(self, x, lower, upper, *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self._logp = T.log(T.switch(T.gt(x, upper), 0, T.switch(T.lt(x, lower), 0, 1/(upper - lower))))
self._cdf = T.switch(T.gt(x, up), 1, T.switch(T.lt(x, low), 0, (x - low)/(up - low)))
self._add_expr('x', x)
self._add_expr('lower', lower)
self._add_expr('upper', upper)
示例10: _recursive_step
def _recursive_step(self, i, regs, tokens, seqs, back_routes, back_lens):
seq = seqs[i]
# Encoding
left, right, target = seq[0], seq[1], seq[2]
left_rep = ifelse(T.lt(left, 0), tokens[-left], regs[left])
right_rep = ifelse(T.lt(right, 0), tokens[-right], regs[right])
rep = self._encode_computation(left_rep, right_rep)
if self.deep:
inter_rep = rep
rep = self._deep_encode(inter_rep)
else:
inter_rep = T.constant(0)
new_regs = T.set_subtensor(regs[target], rep)
back_len = back_lens[i]
back_reps, lefts, rights = self._unfold(back_routes[i], new_regs, back_len)
gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2, distance, rep_gradient = self._unfold_gradients(back_reps, lefts, rights, back_routes[i],
tokens, back_len)
return ([rep, inter_rep, left_rep, right_rep, new_regs, rep_gradient, distance],
self.decode_optimizer.setup([self.W_d1, self.W_d2, self.B_d1, self.B_d2],
[gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2], method=self.optimization, beta=self.beta))
示例11: interval_reduction
def interval_reduction(a, b, c, d, tol):
fc = f(c)
fd = f(d)
a, b, c, d = ifelse(T.lt(fc, fd), [a, d, d - golden_ratio * (d - a), c], [c, b, d, c + golden_ratio * (b - c)])
stoprule = theano.scan_module.until(T.lt(T.abs_(c - d), tol))
return [a, b, c, d], stoprule
示例12: rprop
def rprop(param,learning_rate,gparam,mask,updates,current_cost,previous_cost,
eta_plus=1.2,eta_minus=0.5,max_delta=50, min_delta=10e-6):
previous_grad = sharedX(numpy.ones(param.shape.eval()),borrow=True)
delta = sharedX(learning_rate * numpy.ones(param.shape.eval()),borrow=True)
previous_inc = sharedX(numpy.zeros(param.shape.eval()),borrow=True)
zero = T.zeros_like(param)
one = T.ones_like(param)
change = previous_grad * gparam
new_delta = T.clip(
T.switch(
T.eq(gparam,0.),
delta,
T.switch(
T.gt(change,0.),
delta*eta_plus,
T.switch(
T.lt(change,0.),
delta*eta_minus,
delta
)
)
),
min_delta,
max_delta
)
new_previous_grad = T.switch(
T.eq(mask * gparam,0.),
previous_grad,
T.switch(
T.gt(change,0.),
gparam,
T.switch(
T.lt(change,0.),
zero,
gparam
)
)
)
inc = T.switch(
T.eq(mask * gparam,0.),
zero,
T.switch(
T.gt(change,0.),
- T.sgn(gparam) * new_delta,
T.switch(
T.lt(change,0.),
zero,
- T.sgn(gparam) * new_delta
)
)
)
updates.append((previous_grad,new_previous_grad))
updates.append((delta,new_delta))
updates.append((previous_inc,inc))
return param + inc * mask
示例13: get_output_for
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic or self.rate == 0:
return input
else:
drop = self._srng.uniform(input.shape)
z = T.lt(drop, 0.5 * self.rate)
o = T.lt(T.abs_(drop - 0.75 * self.rate), 0.25 * self.rate)
input = T.set_subtensor(input[z.nonzero()], 0.)
input = T.set_subtensor(input[o.nonzero()], 1.)
return input
示例14: berhu
def berhu(predictions, targets,s=0.2,l=0.5,m=1.2):
# Compute mask
mask = T.gt(targets, l) * T.lt(targets,m)
# Compute n of valid pixels
n_valid = T.sum(mask)
# Redundant mult here
r = (predictions - targets) * mask
c = s * T.max(T.abs_(r))
a_r = T.abs_(r)
b = T.switch(T.lt(a_r, c), a_r, ((r**2) + (c**2))/(2*c))
return T.sum(b)/n_valid
示例15: _step
def _step(
i,
pkm1, pkm2, qkm1, qkm2,
k1, k2, k3, k4, k5, k6, k7, k8, r
):
xk = -(x * k1 * k2) / (k3 * k4)
pk = pkm1 + pkm2 * xk
qk = qkm1 + qkm2 * xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
xk = (x * k5 * k6) / (k7 * k8)
pk = pkm1 + pkm2 * xk
qk = qkm1 + qkm2 * xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
old_r = r
r = tt.switch(tt.eq(qk, zero), r, pk/qk)
k1 += one
k2 += k26update
k3 += two
k4 += two
k5 += one
k6 -= k26update
k7 += two
k8 += two
big_cond = tt.gt(tt.abs_(qk) + tt.abs_(pk), BIG)
biginv_cond = tt.or_(
tt.lt(tt.abs_(qk), BIGINV),
tt.lt(tt.abs_(pk), BIGINV)
)
pkm2 = tt.switch(big_cond, pkm2 * BIGINV, pkm2)
pkm1 = tt.switch(big_cond, pkm1 * BIGINV, pkm1)
qkm2 = tt.switch(big_cond, qkm2 * BIGINV, qkm2)
qkm1 = tt.switch(big_cond, qkm1 * BIGINV, qkm1)
pkm2 = tt.switch(biginv_cond, pkm2 * BIG, pkm2)
pkm1 = tt.switch(biginv_cond, pkm1 * BIG, pkm1)
qkm2 = tt.switch(biginv_cond, qkm2 * BIG, qkm2)
qkm1 = tt.switch(biginv_cond, qkm1 * BIG, qkm1)
return ((pkm1, pkm2, qkm1, qkm2,
k1, k2, k3, k4, k5, k6, k7, k8, r),
until(tt.abs_(old_r - r) < (THRESH * tt.abs_(r))))