本文整理汇总了Python中theano.tensor.ge函数的典型用法代码示例。如果您正苦于以下问题:Python ge函数的具体用法?Python ge怎么用?Python ge使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ge函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: filter_boxes
def filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
# keep = np.where((ws >= min_size) & (hs >= min_size))[0]
keep = (T.ge(ws, min_size) & T.ge(hs, min_size)).nonzero()[0]
return keep
示例2: __init__
def __init__(self, random_state=None, low=0.0, high=1.0):
super(Uniform, self).__init__(low=low, high=high,
random_state=random_state,
optimizer=None)
# pdf
self.pdf_ = T.switch(
T.or_(T.lt(self.X, self.low), T.ge(self.X, self.high)),
0.,
1. / (self.high - self.low)).ravel()
self.make_(self.pdf_, "pdf")
# -log pdf
self.nnlf_ = T.switch(
T.or_(T.lt(self.X, self.low), T.ge(self.X, self.high)),
np.inf,
T.log(self.high - self.low)).ravel()
self.make_(self.nnlf_, "nnlf")
# cdf
self.cdf_ = T.switch(
T.lt(self.X, self.low),
0.,
T.switch(
T.lt(self.X, self.high),
(self.X - self.low) / (self.high - self.low),
1.)).ravel()
self.make_(self.cdf_, "cdf")
# ppf
self.ppf_ = self.p * (self.high - self.low) + self.low
self.make_(self.ppf_, "ppf", args=[self.p])
示例3: matrix_noise3d
def matrix_noise3d(input_vectors, perm, grad3, vertex_table):
skew_factors = (input_vectors[:, 0] + input_vectors[:, 1] + input_vectors[:, 2]) * 1.0 / 3.0
skewed_vectors = T.floor(input_vectors + skew_factors[:, np.newaxis])
unskew_factors = (skewed_vectors[:, 0] + skewed_vectors[:, 1] + skewed_vectors[:, 2]) * 1.0 / 6.0
offsets_0 = input_vectors - (skewed_vectors - unskew_factors[:, np.newaxis])
vertex_table_x_index = T.ge(offsets_0[:, 0], offsets_0[:, 1])
vertex_table_y_index = T.ge(offsets_0[:, 1], offsets_0[:, 2])
vertex_table_z_index = T.ge(offsets_0[:, 0], offsets_0[:, 2])
simplex_vertices = vertex_table[
vertex_table_x_index,
vertex_table_y_index,
vertex_table_z_index].reshape((input_vectors.shape[0], 2, 3))
offsets_1 = offsets_0 - simplex_vertices[:, 0] + 1.0 / 6.0
offsets_2 = offsets_0 - simplex_vertices[:, 1] + 1.0 / 3.0
offsets_3 = offsets_0 - 0.5
masked_skewed_vectors = T.bitwise_and(skewed_vectors.astype('int32'), 255)
gi0s = perm[masked_skewed_vectors[:, 0] + perm[
masked_skewed_vectors[:, 1] + perm[
masked_skewed_vectors[:, 2]].astype('int32')].astype('int32')] % 12
gi1s = perm[masked_skewed_vectors[:, 0] + simplex_vertices[:, 0, 0] + perm[
masked_skewed_vectors[:, 1] + simplex_vertices[:, 0, 1] + perm[
masked_skewed_vectors[:, 2] + simplex_vertices[:, 0, 2]].astype('int32')].astype('int32')] % 12
gi2s = perm[masked_skewed_vectors[:, 0] + simplex_vertices[:, 1, 0] + perm[
masked_skewed_vectors[:, 1] + simplex_vertices[:, 1, 1] + perm[
masked_skewed_vectors[:, 2] + simplex_vertices[:, 1, 2]].astype('int32')].astype('int32')] % 12
gi3s = perm[masked_skewed_vectors[:, 0] + 1 + perm[
masked_skewed_vectors[:, 1] + 1 + perm[
masked_skewed_vectors[:, 2] + 1].astype('int32')].astype('int32')] % 12
n0s = calculate_gradient_contribution(offsets_0, gi0s, grad3)
n1s = calculate_gradient_contribution(offsets_1, gi1s, grad3)
n2s = calculate_gradient_contribution(offsets_2, gi2s, grad3)
n3s = calculate_gradient_contribution(offsets_3, gi3s, grad3)
return 23.0 * (n0s + n1s + n2s + n3s)
示例4: _step_test
def _step_test(self,
x_t, xi_t, xf_t, xo_t, xc_t, mask_tm1,
pred1_tm1, pred2_tm1, pred3_tm1, pred4_tm1, h_tm1, c_tm1, ctx_tm1,
u_i, u_f, u_o, u_c, x_encoder, attention_encoder, x_img, B_W, B_U, B_Wimg, B_Wctx):
outer1 = pred1_tm1[:, :, np.newaxis] * pred2_tm1[:, np.newaxis, :]
outer1 = outer1.reshape((outer1.shape[0],-1))
outer2 = pred3_tm1[:, :, np.newaxis] * pred4_tm1[:, np.newaxis, :]
outer2 = outer2.reshape((outer2.shape[0],-1))
pred = outer1[:, :, np.newaxis] * outer2[:, np.newaxis, :]
pred = pred.reshape((pred.shape[0],-1))
x_t = self.W_embedding[T.argmax(pred, axis = 1)] * B_W[4]
h_mask_tm1 = mask_tm1 * h_tm1
c_mask_tm1 = mask_tm1 * c_tm1
attention_x = T.dot(x_t, self.W_x2a)
attention_total = attention_x[:,None,:] + attention_encoder
if self.prev_context:
attention_prev = T.dot(ctx_tm1,self.W_ctx2a)
attention_total += attention_prev[:,None,:]
attention_activation = T.dot( T.tanh(attention_total), self.V) # attention -> scores
attention_alpha = T.nnet.softmax(attention_activation[:,:,0]) # scores -> weights
ctx_t = (x_encoder * attention_alpha[:,:,None]).sum(axis = 1) # weighted average of context vectors
xi_t = T.dot(x_t * B_W[0], self.W_i) + self.b_i + T.dot(x_img * B_Wimg[0], self.Wimg_i) + T.dot(ctx_t * B_Wctx[0], self.Wctx_i)
xf_t = T.dot(x_t * B_W[1], self.W_f) + self.b_f + T.dot(x_img * B_Wimg[1], self.Wimg_f) + T.dot(ctx_t * B_Wctx[1], self.Wctx_f)
xc_t = T.dot(x_t * B_W[2], self.W_c) + self.b_c + T.dot(x_img * B_Wimg[2], self.Wimg_c) + T.dot(ctx_t * B_Wctx[2], self.Wctx_c)
xo_t = T.dot(x_t * B_W[3], self.W_o) + self.b_o + T.dot(x_img * B_Wimg[3], self.Wimg_o) + T.dot(ctx_t * B_Wctx[3], self.Wctx_o)
i_t = self.inner_activation(xi_t + T.dot(h_mask_tm1 * B_U[0], u_i))
f_t = self.inner_activation(xf_t + T.dot(h_mask_tm1 * B_U[1], u_f))
c_t = f_t * c_mask_tm1 + i_t * self.activation(xc_t + T.dot(h_mask_tm1 * B_U[2], u_c))
o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1 * B_U[3], u_o))
h_t = o_t * self.activation(c_t)
pred1_t = T.dot(h_t, self.U_p1) + self.b_p1
pred1_t = T.nnet.softmax(pred1_t.reshape((-1, pred1_t.shape[-1]))).reshape(pred1_t.shape)
pred2_t = T.dot(h_t, self.U_p2) + self.b_p2
pred2_t = T.nnet.softmax(pred2_t.reshape((-1, pred2_t.shape[-1]))).reshape(pred2_t.shape)
pred3_t = T.dot(h_t, self.U_p3) + self.b_p3
pred3_t = T.nnet.softmax(pred3_t.reshape((-1, pred3_t.shape[-1]))).reshape(pred3_t.shape)
pred4_t = T.dot(h_t, self.U_p4) + self.b_p4
pred4_t = T.nnet.softmax(pred4_t.reshape((-1, pred4_t.shape[-1]))).reshape(pred4_t.shape)
pred1_t = T.ge(pred1_t, T.max(pred1_t, axis = 1).reshape((pred1_t.shape[0],1)))*1.0
pred2_t = T.ge(pred2_t, T.max(pred2_t, axis = 1).reshape((pred2_t.shape[0],1)))*1.0
pred3_t = T.ge(pred3_t, T.max(pred3_t, axis = 1).reshape((pred3_t.shape[0],1)))*1.0
pred4_t = T.ge(pred4_t, T.max(pred4_t, axis = 1).reshape((pred4_t.shape[0],1)))*1.0
return pred1_t, pred2_t, pred3_t, pred4_t, h_t, c_t, ctx_t
示例5: innerL_
def innerL_(sS, i):
Ei = calcEk_(sS, i)
# use "+" instead of "or" and "*" instead of "and"
checkUselessAlpha1 = T.ge(sS.labels[i] * Ei, -sS.tol) + T.ge(sS.alphas[i], sS.C)
checkUselessAlpha2 = T.le(sS.labels[i]*Ei, sS.tol) + T.lt(sS.alphas[i], 0)
isUselessAlpha = toTheanoBool(checkUselessAlpha1 * checkUselessAlpha2)
updateL = innerL_alphaInRange_(sS, i, Ei)
earlyret = sS.retlist(0)
return ifelse(isUselessAlpha, earlyret, updateL)
示例6: RMSprop
def RMSprop(self, cost, params, full_params, sampled_params, sidxs, epsilon=1e-6):
grads = [T.grad(cost = cost, wrt = param) for param in params]
sgrads = [T.grad(cost = cost, wrt = sparam) for sparam in sampled_params]
updates = OrderedDict()
if self.grad_cap>0:
norm=T.cast(T.sqrt(T.sum([T.sum([T.sum(g**2) for g in g_list]) for g_list in grads]) + T.sum([T.sum(g**2) for g in sgrads])), theano.config.floatX)
grads = [[T.switch(T.ge(norm, self.grad_cap), g*self.grad_cap/norm, g) for g in g_list] for g_list in grads]
sgrads = [T.switch(T.ge(norm, self.grad_cap), g*self.grad_cap/norm, g) for g in sgrads]
for p_list, g_list in zip(params, grads):
for p, g in zip(p_list, g_list):
if self.adapt:
if self.adapt == 'adagrad':
g = self.adagrad(p, g, updates)
if self.adapt == 'rmsprop':
g = self.rmsprop(p, g, updates)
if self.adapt == 'adadelta':
g = self.adadelta(p, g, updates)
if self.adapt == 'adam':
g = self.adam(p, g, updates)
if self.momentum > 0:
velocity = theano.shared(p.get_value(borrow=False) * 0., borrow=True)
velocity2 = self.momentum * velocity - np.float32(self.learning_rate) * (g + self.lmbd * p)
updates[velocity] = velocity2
updates[p] = p + velocity2
else:
updates[p] = p * np.float32(1.0 - self.learning_rate * self.lmbd) - np.float32(self.learning_rate) * g
for i in range(len(sgrads)):
g = sgrads[i]
fullP = full_params[i]
sample_idx = sidxs[i]
sparam = sampled_params[i]
if self.adapt:
if self.adapt == 'adagrad':
g = self.adagrad(fullP, g, updates, sample_idx)
if self.adapt == 'rmsprop':
g = self.rmsprop(fullP, g, updates, sample_idx)
if self.adapt == 'adadelta':
g = self.adadelta(fullP, g, updates, sample_idx)
if self.adapt == 'adam':
g = self.adam(fullP, g, updates, sample_idx)
if self.lmbd > 0:
delta = np.float32(self.learning_rate) * (g + self.lmbd * sparam)
else:
delta = np.float32(self.learning_rate) * g
if self.momentum > 0:
velocity = theano.shared(fullP.get_value(borrow=False) * 0., borrow=True)
vs = velocity[sample_idx]
velocity2 = self.momentum * vs - delta
updates[velocity] = T.set_subtensor(vs, velocity2)
updates[fullP] = T.inc_subtensor(sparam, velocity2)
else:
updates[fullP] = T.inc_subtensor(sparam, - delta)
return updates
示例7: compute_nonlinearity_derivative
def compute_nonlinearity_derivative(lin, bias):
n_h = bias.shape[0]
lin_re = lin[:, :n_h]
lin_im = lin[:, n_h:]
mod = T.sqrt(lin_re**2 + lin_im**2)
ind = T.ge(mod + bias.dimshuffle('x', 0), 0)
opt1 = 1.
opt2 = 1. / (1 - mod - bias.dimshuffle('x', 0))**2
ind = T.ge(mod, 1)
dnonlindlin = T.tile(ind * opt1 + (1-ind) * opt2, [1, 2])
return dnonlindlin
示例8: _decode_step
def _decode_step(self, seq, regs):
left, right, target = seq[0], seq[1], seq[2]
left_is_not_token = T.ge(left, 0)
right_is_not_token = T.ge(right, 0)
rep = regs[target]
left_dec, right_dec = self._decode_computation(rep)
regs = ifelse(left_is_not_token, T.set_subtensor(regs[left], left_dec), regs)
regs = ifelse(right_is_not_token, T.set_subtensor(regs[right], right_dec), regs)
return rep, left_dec, right_dec, regs
示例9: cubicBSpline
def cubicBSpline(self, L):
b = T.zeros_like(L)
idx4 = T.ge(L, 0) * T.lt(L, 1)
idx3 = T.ge(L, 1) * T.lt(L, 2)
idx2 = T.ge(L, 2) * T.lt(L, 3)
idx1 = T.ge(L, 3) * T.le(L, 4)
b = T.switch(T.eq(idx4, 1), T.pow(L, 3) / 6, b)
b = T.switch(T.eq(idx3, 1), (-3*T.pow(L-1,3) + 3*T.pow(L-1,2) + 3*(L-1) + 1) / 6, b)
b = T.switch(T.eq(idx2, 1), ( 3*T.pow(L-2,3) - 6*T.pow(L-2,2) + 4) / 6, b)
b = T.switch(T.eq(idx1, 1), (- T.pow(L-3,3) + 3*T.pow(L-3,2) - 3*(L-3) + 1) / 6, b)
return b.T # b is K x K' and thus, as we multiply from the right with
示例10: huber_loss
def huber_loss(y_hat, target, delta=1, center=0, std=1):
l1_diff = abs((target - center - y_hat) / std)
huber_loss = TT.switch(TT.ge(l1_diff, delta),
(2*l1_diff - 1) * delta,
l1_diff**2)
return huber_loss
示例11: Adagrad
def Adagrad(tparams, cost, inps, lr, epsilon=1e-6,clip_norm=5):
""" default: lr=0.01 """
grads = tensor.grad(cost, tparams.values())
norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
if tensor.ge(norm, clip_norm):
grads = [g*clip_norm/norm for g in grads]
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inps, cost, updates=gsup)
updates = []
for p, g in zip(tparams.values(), gshared):
acc = theano.shared(p.get_value() * 0.)
acc_t = acc + g ** 2
updates.append((acc, acc_t))
p_t = p - (lr / tensor.sqrt(acc_t + epsilon)) * g
updates.append((p, p_t))
f_update = theano.function([lr], [], updates=updates)
return f_grad_shared, f_update
示例12: NAG
def NAG(tparams, cost, inps, lr, momentum=0.9,clip_norm=5):
""" default: lr=0.01 """
grads = tensor.grad(cost, tparams.values())
norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
if tensor.ge(norm, clip_norm):
grads = [g*clip_norm/norm for g in grads]
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inps, cost, updates=gsup)
updates = []
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
m_new = momentum * m - lr * g
updates.append((m, m_new))
updated_p = p + momentum * m_new - lr * g
updates.append((p, updated_p))
f_update = theano.function([lr], [], updates=updates)
return f_grad_shared, f_update
示例13: RMSprop_v1
def RMSprop_v1(tparams, cost, inps, lr, rho=0.9, epsilon=1e-6,clip_norm=5):
""" default: lr=0.001
This is the implementation of the RMSprop algorithm used in
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf.
"""
grads = tensor.grad(cost, tparams.values())
norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
if tensor.ge(norm, clip_norm):
grads = [g*clip_norm/norm for g in grads]
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inps, cost, updates=gsup)
updates = []
for p, g in zip(tparams.values(), gshared):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
updates.append((acc, acc_new))
updated_p = p - lr * (g / tensor.sqrt(acc_new + epsilon))
updates.append((p, updated_p))
f_update = theano.function([lr], [], updates=updates)
return f_grad_shared, f_update
示例14: Adadelta
def Adadelta(tparams, cost, inps, lr, rho=0.95, epsilon=1e-6,clip_norm=5):
""" default: lr=0.5 """
grads = tensor.grad(cost, tparams.values())
norm = tensor.sqrt(sum([tensor.sum(g**2) for g in grads]))
if tensor.ge(norm, clip_norm):
grads = [g*clip_norm/norm for g in grads]
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inps, cost, updates=gsup)
updates = []
for p, g in zip(tparams.values(), gshared):
acc = theano.shared(p.get_value() * 0.)
acc_delta = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
updates.append((acc,acc_new))
update = g * tensor.sqrt(acc_delta + epsilon) / tensor.sqrt(acc_new + epsilon)
updated_p = p - lr * update
updates.append((p, updated_p))
acc_delta_new = rho * acc_delta + (1 - rho) * update ** 2
updates.append((acc_delta,acc_delta_new))
f_update = theano.function([lr], [], updates=updates)
return f_grad_shared, f_update
示例15: compute_updates
def compute_updates(self, training_cost, params):
updates = []
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates