本文整理匯總了Python中theano.tensor.ceil方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.ceil方法的具體用法?Python tensor.ceil怎麽用?Python tensor.ceil使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.ceil方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: pad_to_a_multiple
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def pad_to_a_multiple(tensor_, k, pad_with):
"""Pad a tensor to make its first dimension a multiple of a number.
Parameters
----------
tensor_ : :class:`~theano.Variable`
k : int
The number, multiple of which the length of tensor is made.
pad_with : float or int
The value for padding.
"""
new_length = (
tensor.ceil(tensor_.shape[0].astype('float32') / k) * k).astype('int64')
new_shape = tensor.set_subtensor(tensor_.shape[:1], new_length)
canvas = tensor.alloc(pad_with, tensor.prod(new_shape)).reshape(
new_shape, ndim=tensor_.ndim)
return tensor.set_subtensor(canvas[:tensor_.shape[0]], tensor_)
示例2: spp_max_pool_axis_kwargs
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def spp_max_pool_axis_kwargs(in_shape, out_shape):
symbolic = (treeano.utils.is_variable(in_shape)
or treeano.utils.is_variable(out_shape))
# maxpool requires static shape
assert not symbolic
if symbolic:
int_ceil = lambda x: T.ceil(x).astype("int32")
else:
int_ceil = lambda x: int(np.ceil(x))
# eg. if input is 5 and output is 2, each pool size should be 3
pool_size = int_ceil(in_shape / out_shape)
# stride should equal pool_size, since we want non-overlapping regions
stride = pool_size
# pad as much as possible, since ignore_border=True
padding = int_ceil((pool_size * out_shape - in_shape) / 2)
if not symbolic:
assert padding < pool_size
return dict(
ds=pool_size,
st=stride,
padding=padding,
)
示例3: compute_sub_all_scores
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def compute_sub_all_scores(self, start_end):
plu = softmax(T.dot(self.trained_users[start_end], self.trained_items.T))[:, :-1] # (n_batch, n_item)
length = T.max(T.sum(self.tes_masks[start_end], axis=1)) # 253
cidx = T.arange(length).reshape((1, length)) + self.tra_accum_lens[start_end][:, 0].reshape((len(start_end), 1))
cl = T.sum(self.trained_items[self.tra_context_masks[cidx]], axis=2) # n_batch x seq_length x n_size
cl = cl.dimshuffle(1, 2, 0)
pb = self.trained_branch[self.routes] # (n_item x 4 x tree_depth x n_size)
shp0, shp1, shp2 = self.lrs.shape
lrs = self.lrs.reshape((shp0, shp1, shp2, 1, 1))
pr_bc = T.dot(pb, cl)
br = sigmoid(pr_bc * lrs) * T.ceil(abs(pr_bc)) # (n_item x 4 x tree_depth x seq_length x n_batch)
path = T.prod(br, axis=2) * self.probs.reshape((shp0, shp1, 1, 1))
del cl, pb, br, lrs
# paths = T.prod((T.floor(1 - path) + path), axis=1) # (n_item x seq_length x n_batch)
paths = T.sum(path, axis=1)
paths = T.floor(1 - paths) + paths
p = paths[:-1].T * plu.reshape((plu.shape[0], 1, plu.shape[1])) # (n_batch x n_item)
# p = plu.reshape((plu.shape[0], 1, plu.shape[1])) * T.ones((plu.shape[0], length, plu.shape[1]))
return T.reshape(p, (p.shape[0] * p.shape[1], p.shape[2])).eval()
示例4: ceil
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def ceil(x):
"""
Elemwise ceiling of `x`.
"""
# see decorator for function body
示例5: fast_jacobian
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def fast_jacobian(expr, wrt, chunk_size=16, func=None):
'''
Computes the jacobian by tiling the inputs
Copied from https://gist.github.com/aam-at/2b2bc5c35850b553d4ec
'''
assert isinstance(expr, Variable), \
"tensor.jacobian expects a Variable as `expr`"
assert expr.ndim < 2, \
("tensor.jacobian expects a 1 dimensional variable as "
"`expr`. If not use flatten to make it a vector")
num_chunks = tt.ceil(1.0 * expr.shape[0] / chunk_size)
num_chunks = tt.cast(num_chunks, 'int32')
steps = tt.arange(num_chunks)
remainder = expr.shape[0] % chunk_size
def chunk_grad(i):
''' operates on a subset of the gradient variables '''
wrt_rep = tt.tile(wrt, (chunk_size, 1))
if func is not None:
expr_rep = func(wrt_rep)
else:
expr_rep, _ = theano.scan(
fn=lambda wrt_: theano.clone(expr, {wrt: wrt_}),
sequences=wrt_rep)
chunk_expr_grad = tt.roll(
tt.identity_like(expr_rep),
i * chunk_size,
axis=1)
return tt.grad(cost=None,
wrt=wrt_rep,
known_grads={
expr_rep: chunk_expr_grad
})
grads, _ = theano.scan(chunk_grad, sequences=steps)
grads = grads.reshape((chunk_size * grads.shape[0], wrt.shape[0]))
jac = ifelse.ifelse(tt.eq(remainder, 0), grads, grads[:expr.shape[0], :])
return jac
示例6: ceil
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def ceil(x):
return T.ceil(x)
示例7: __theano_train__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ceil [as 別名]
def __theano_train__(self, n_size):
"""
Pr(l|u, C(l)) = Pr(l|u) * Pr(l|C(l))
Pr(u, l, t) = Pr(l|u, C(l)) if C(l) exists,
Pr(l|u) otherwise.
$Theta$ = argmax Pr(u, l, t)
"""
tra_mask = T.ivector()
seq_length = T.sum(tra_mask) # 有效長度
wl = T.concatenate((self.wl, self.wl_m))
tidx, cidx, bidx, userid = T.ivector(), T.imatrix(), T.itensor3(), T.iscalar()
pb = self.pb[bidx] # (seq_length x 4 x depth x n_size)
lrs = self.lrs[tidx] # (seq_length x 4 x depth)
# user preference
xu = self.xu[userid]
plu = softmax(T.dot(xu, self.wl.T))
# geographical influence
cl = T.sum(wl[cidx], axis=1) # (seq_length x n_size)
cl = cl.reshape((cl.shape[0], 1, 1, cl.shape[1]))
br = sigmoid(T.sum(pb[:seq_length] * cl, axis=3) * lrs[:seq_length]) * T.ceil(abs(T.mean(cl, axis=3)))
path = T.prod(br, axis=2) * self.probs[tidx][:seq_length]
# paths = T.prod((T.floor(1-path) + path), axis=1)
paths = T.sum(path, axis=1)
paths = T.floor(1 - paths) + paths
# ----------------------------------------------------------------------------
# cost, gradients, learning rate, l2 regularization
lr, l2 = self.alpha_lambda[0], self.alpha_lambda[1]
seq_l2_sq = T.sum([T.sum(par ** 2) for par in [xu, self.wl]])
upq = - 1 * T.sum(T.log(plu[tidx[:seq_length]] * paths)) / seq_length
seq_costs = (
upq +
0.5 * l2 * seq_l2_sq)
seq_grads = T.grad(seq_costs, self.params)
seq_updates = [(par, par - lr * gra) for par, gra in zip(self.params, seq_grads)]
pars_subs = [(self.xu, xu), (self.pb, pb)]
seq_updates.extend([(par, T.set_subtensor(sub, sub - lr * T.grad(seq_costs, sub)))
for par, sub in pars_subs])
# ----------------------------------------------------------------------------
uidx = T.iscalar() # T.iscalar()類型是 TensorType(int32, )
self.seq_train = theano.function(
inputs=[uidx],
outputs=upq,
updates=seq_updates,
givens={
userid: uidx,
tidx: self.tra_target_masks[uidx],
cidx: self.tra_context_masks[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])],
bidx: self.routes[self.tra_target_masks[uidx]],
tra_mask: self.tra_masks[uidx]
# tra_mask_cot: self.tra_masks_cot[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])]
})