本文整理汇总了Python中theano.tensor.prod方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.prod方法的具体用法?Python tensor.prod怎么用?Python tensor.prod使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.prod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: define_cost
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def define_cost(self, pred, y0, m0):
bsize = self.bsize
npix = int(np.prod(test_shape(y0)[1:]))
y0_target = y0.reshape((self.bsize, npix))
y0_mask = m0.reshape((self.bsize, npix))
pred = pred.reshape((self.bsize, npix))
p = pred * y0_mask
t = y0_target * y0_mask
d = (p - t)
nvalid_pix = T.sum(y0_mask, axis=1)
depth_cost = (T.sum(nvalid_pix * T.sum(d**2, axis=1))
- 0.5*T.sum(T.sum(d, axis=1)**2)) \
/ T.maximum(T.sum(nvalid_pix**2), 1)
return depth_cost
示例2: compute_psi1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def compute_psi1(lls, lsf, xmean, xvar, z):
if xmean.ndim == 1:
xmean = xmean[ None, : ]
ls = T.exp(lls)
sf = T.exp(lsf)
lspxvar = ls + xvar
constterm1 = ls / lspxvar
constterm2 = T.prod(T.sqrt(constterm1), 1)
r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
- np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)
return psi1
示例3: pad_to_a_multiple
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def pad_to_a_multiple(tensor_, k, pad_with):
"""Pad a tensor to make its first dimension a multiple of a number.
Parameters
----------
tensor_ : :class:`~theano.Variable`
k : int
The number, multiple of which the length of tensor is made.
pad_with : float or int
The value for padding.
"""
new_length = (
tensor.ceil(tensor_.shape[0].astype('float32') / k) * k).astype('int64')
new_shape = tensor.set_subtensor(tensor_.shape[:1], new_length)
canvas = tensor.alloc(pad_with, tensor.prod(new_shape)).reshape(
new_shape, ndim=tensor_.ndim)
return tensor.set_subtensor(canvas[:tensor_.shape[0]], tensor_)
示例4: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def __init__(self, input_layer, n_outputs, weights_std, init_bias_value, nonlinearity=rectify, dropout=0.):
self.n_outputs = n_outputs
self.input_layer = input_layer
self.weights_std = np.float32(weights_std)
self.init_bias_value = np.float32(init_bias_value)
self.nonlinearity = nonlinearity
self.dropout = dropout
self.mb_size = self.input_layer.mb_size
input_shape = self.input_layer.get_output_shape()
self.n_inputs = int(np.prod(input_shape[1:]))
self.flatinput_shape = (self.mb_size, self.n_inputs)
self.W = shared_single(2) # theano.shared(np.random.randn(self.n_inputs, n_outputs).astype(np.float32) * weights_std)
self.b = shared_single(1) # theano.shared(np.ones(n_outputs).astype(np.float32) * self.init_bias_value)
self.params = [self.W, self.b]
self.bias_params = [self.b]
self.reset_params()
示例5: output
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def output(self, input=None, dropout_active=True, *args, **kwargs): # use the 'dropout_active' keyword argument to disable it at test time. It is on by default.
if input == None:
input = self.input_layer.output(dropout_active=dropout_active, *args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
if self.dropout_tied:
# tying of the dropout masks across the entire feature maps, so broadcast across the feature maps.
mask = srng.binomial((input.shape[0], input.shape[1]), p=retain_prob, dtype='int32').astype('float32').dimshuffle(0, 1, 'x', 'x')
else:
mask = srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly. By doing this it's no longer necessary to rescale the weights at test time.
input = input / retain_prob * mask
prod = T.tensordot(input, self.W, [[1], [0]]) # this has shape (batch_size, width, height, out_maps)
prod = prod.dimshuffle(0, 3, 1, 2) # move the feature maps to the 1st axis, where they were in the input
return self.nonlinearity(prod + self.b.dimshuffle('x', 0, 'x', 'x'))
示例6: sample
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def sample(self, shape):
if len(shape) != 4:
raise RuntimeError("Only shapes of length 4 are supported.")
fan_in = int(np.prod(shape[1:]))
flat_shape = (shape[0], fan_in)
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
q_conv = q.reshape(shape)
# size = np.maximum(shape[0], fan_in)
# a = np.random.normal(0.0, 1.0, (size, size))
# q, _ = np.linalg.qr(a)
# q_conv = q[:shape[0], :fan_in].reshape(shape)
return nn.utils.floatX(self.gain * q_conv)
示例7: count_params
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def count_params(x):
'''Returns the number of scalars in a tensor.
Return: numpy integer.
'''
return np.prod(x.shape.eval())
示例8: prod
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def prod(x, axis=None, keepdims=False):
'''Multiply the values in a tensor, alongside the specified axis.
'''
return T.prod(x, axis=axis, keepdims=keepdims)
示例9: batch_flatten
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def batch_flatten(x):
'''Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
'''
# TODO: `keras_shape` inference.
x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
return x
示例10: count_params
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def count_params(x):
'''Return number of scalars in a tensor.
Return: numpy integer.
'''
return np.prod(x.shape.eval())
示例11: batch_flatten
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def batch_flatten(x):
'''Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
'''
x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
return x
示例12: local_det_chol
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def local_det_chol(node):
"""
If we have det(X) and there is already an L=cholesky(X)
floating around, then we can use prod(diag(L)) to get the determinant.
"""
if node.op == det:
x, = node.inputs
for (cl, xpos) in x.clients:
if isinstance(cl.op, Cholesky):
L = cl.outputs[0]
return [tensor.prod(extract_diag(L) ** 2)]
示例13: local_log_prod_sqr
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def local_log_prod_sqr(node):
if node.op == tensor.log:
x, = node.inputs
if x.owner and isinstance(x.owner.op, tensor.elemwise.Prod):
# we cannot always make this substitution because
# the prod might include negative terms
p = x.owner.inputs[0]
# p is the matrix we're reducing with prod
if is_positive(p):
return [tensor.log(p).sum(axis=x.owner.op.axis)]
# TODO: have a reduction like prod and sum that simply
# returns the sign of the prod multiplication.
示例14: compute_psi1_numpy
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def compute_psi1_numpy(lls, lsf, xmean, xvar, z):
if xmean.ndim == 1:
xmean = xmean[ None, : ]
ls = np.exp(lls)
sf = np.exp(lsf)
lspxvar = ls + xvar
constterm1 = ls / lspxvar
constterm2 = np.prod(np.sqrt(constterm1), 1)
r2_psi1 = np.outer(np.sum(xmean * xmean / lspxvar, 1), \
np.ones(z.shape[ 0 ])) - 2 * np.dot(xmean / lspxvar, z.T) + \
np.dot(1.0 / lspxvar, z.T **2)
psi1 = sf * np.outer(constterm2, np.ones(z.shape[ 0 ])) * np.exp(-0.5 * r2_psi1)
return psi1
示例15: compute_psi2
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import prod [as 别名]
def compute_psi2(lls, lsf, z, input_means, input_vars):
ls = T.exp(lls)
sf = T.exp(lsf)
b = ls / casting(2.0)
term_1 = T.prod(T.sqrt(b / (b + input_vars)), 1)
scale = T.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))
scaled_z = z[ None, : , : ] / scale[ : , None , : ]
scaled_z_minus_m = scaled_z
r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \
2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
term_2 = T.exp(-r2b)
scale = T.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))
scaled_z = z[ None, : , : ] / scale[ : , None , : ]
scaled_m = input_means / scale
scaled_m = T.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])
scaled_z_minus_m = scaled_z - scaled_m
r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \
2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
term_3 = T.exp(-r2b)
psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3
return T.transpose(psi2_computed, [ 1, 2, 0 ])