本文整理汇总了Python中theano.tensor.transpose方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.transpose方法的具体用法?Python tensor.transpose怎么用?Python tensor.transpose使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.transpose方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_logical_shapes
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def test_logical_shapes(self):
# Logical shapes are not supported anymore, so we check that it
# raises an Exception.
for stride in range(1, 4):
kshp = (10, 2, 10, 10)
featshp = (3, 10, 11, 11)
a = tensor.ftensor4()
A = tensor.ftensor4()
# Need to transpose first two dimensions of kernel, and reverse
# index kernel image dims (for correlation)
kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])
featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
featshp[3] * stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
self.assertRaises(ValueError, tensor.nnet.conv2d,
a, kernel_rotated,
border_mode='full',
image_shape=featshp,
filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],
kshp_logical=kshp[2:])
示例2: compute_kernel
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def compute_kernel(lls, lsf, x, z):
ls = T.exp(lls)
sf = T.exp(lsf)
if x.ndim == 1:
x = x[ None, : ]
if z.ndim == 1:
z = z[ None, : ]
lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)
r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)
k = sf * T.exp(-np.float32(0.5) * r2)
return k
示例3: compute_psi1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def compute_psi1(lls, lsf, xmean, xvar, z):
if xmean.ndim == 1:
xmean = xmean[ None, : ]
ls = T.exp(lls)
sf = T.exp(lsf)
lspxvar = ls + xvar
constterm1 = ls / lspxvar
constterm2 = T.prod(T.sqrt(constterm1), 1)
r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
- np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)
return psi1
示例4: compute_log_ei
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def compute_log_ei(self, x, incumbent):
Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
KzzInvcovCavity = T.dot(KzzInv, covCavity)
KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv
v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
m_out = T.dot(Kxz, KzzInvmeanCavity)
s = (incumbent - m_out) / T.sqrt(v_out)
log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)
return log_ei
示例5: depth_to_space
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def depth_to_space(input, scale, data_format=None):
"""Uses phase shift algorithm to convert
channels/depth for spatial resolution
"""
if data_format is None:
data_format = image_data_format()
data_format = data_format.lower()
input = _preprocess_conv2d_input(input, data_format)
b, k, row, col = input.shape
out_channels = k // (scale ** 2)
x = T.reshape(input, (b, scale, scale, out_channels, row, col))
x = T.transpose(x, (0, 3, 4, 1, 5, 2))
out = T.reshape(x, (b, out_channels, row * scale, col * scale))
out = _postprocess_conv2d_output(out, input, None, None, None, data_format)
return out
示例6: _format_as_impl
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def _format_as_impl(self, is_numeric, batch, space):
if isinstance(space, VectorSpace):
# We need to ensure that the resulting batch will always be
# the same in `space`, no matter what the axes of `self` are.
if self.axes != self.default_axes:
# The batch index goes on the first axis
assert self.default_axes[0] == 'b'
batch = batch.transpose(*[self.axes.index(axis)
for axis in self.default_axes])
result = batch.reshape((batch.shape[0],
self.get_total_dimension()))
if space.sparse:
result = _dense_to_sparse(result)
elif isinstance(space, Conv2DSpace):
result = Conv2DSpace.convert(batch, self.axes, space.axes)
else:
raise NotImplementedError("%s doesn't know how to format as %s"
% (str(self), str(space)))
return _cast(result, space.dtype)
示例7: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
def _phase_shift(input,r):
bsize,c,a,b = input.shape[0],1,self.output_shape[2]//r,self.output_shape[3]//r
X = T.reshape(input, (bsize,r,r,a,b))
X = T.transpose(X, (0, 3,4,1,2)) # bsize, a, b, r2,r1
X = T.split(x=X,splits_size=[1]*a,n_splits=a,axis=1) # a, [bsize, b, r, r]
X = [T.reshape(x,(bsize,b,r,r))for x in X]
X = T.concatenate(X,axis=2) # bsize, b, a*r, r
X = T.split(x=X,splits_size =[1]*b,n_splits=b,axis=1) # b, [bsize, a*r, r]
X = [T.reshape(x,(bsize,a*r,r))for x in X]
X = T.concatenate(X,axis=2) # bsize, a*r, b*r
return X.dimshuffle(0,'x',1,2)
Xc = T.split(x=input,splits_size =[input.shape[1]//self.c]*self.c,n_splits=self.c,axis=1)
return T.concatenate([_phase_shift(xc,self.r) for xc in Xc],axis=1)
# Multiscale Dilated Convolution Block
# This function (not a layer in and of itself, though you could make it one) returns a set of concatenated conv2d and dilatedconv2d layers.
# Each layer uses the same basic filter W, operating at a different dilation factor (or taken as the mean of W for the 1x1 conv).
# The channel-wise output of each layer is weighted by a set of coefficients, which are initialized to 1 / the total number of dilation scales,
# meaning that were starting by taking an elementwise mean. These should be learnable parameters.
# NOTES: - I'm considering changing the variable names to be more descriptive, and look less like ridiculous academic code. It's on the to-do list.
# - I keep the bias and nonlinearity out of the default definition for this layer, as I expect it to be batchnormed and nonlinearized in the model config.
示例8: transpose
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def transpose(x):
# TODO: `keras_shape` inference.
return T.transpose(x)
示例9: transpose
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def transpose(x):
return T.transpose(x)
示例10: compute_psi2
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def compute_psi2(lls, lsf, z, input_means, input_vars):
ls = T.exp(lls)
sf = T.exp(lsf)
b = ls / casting(2.0)
term_1 = T.prod(T.sqrt(b / (b + input_vars)), 1)
scale = T.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))
scaled_z = z[ None, : , : ] / scale[ : , None , : ]
scaled_z_minus_m = scaled_z
r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \
2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
term_2 = T.exp(-r2b)
scale = T.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))
scaled_z = z[ None, : , : ] / scale[ : , None , : ]
scaled_m = input_means / scale
scaled_m = T.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])
scaled_z_minus_m = scaled_z - scaled_m
r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \
2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
term_3 = T.exp(-r2b)
psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3
return T.transpose(psi2_computed, [ 1, 2, 0 ])
示例11: getLogNormalizerCavity
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def getLogNormalizerCavity(self):
assert self.covCavity is not None and self.meanCavity is not None and self.covCavityInv is not None
return casting(0.5 * self.n_inducing_points * np.log(2 * np.pi)) + casting(0.5) * T.nlinalg.LogDetPSD()(self.covCavity) + \
casting(0.5) * T.dot(T.dot(T.transpose(self.meanCavity), self.covCavityInv), self.meanCavity)
示例12: getLogNormalizerPosterior
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def getLogNormalizerPosterior(self):
assert self.covPosterior is not None and self.meanPosterior is not None and self.covPosteriorInv is not None
return casting(0.5 * self.n_inducing_points * np.log(2 * np.pi)) + casting(0.5) * T.nlinalg.LogDetPSD()(self.covPosterior) + \
casting(0.5) * T.dot(T.dot(T.transpose(self.meanPosterior), self.covPosteriorInv), self.meanPosterior)
##
# We return the contribution to the energy of the node (See last Eq. of Sec. 4 in http://arxiv.org/pdf/1602.04133.pdf v1)
#
示例13: compute_psi2_numpy
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import transpose [as 别名]
def compute_psi2_numpy(lls, lsf, z, input_means, input_vars):
ls = np.exp(lls)
sf = np.exp(lsf)
b = ls / casting(2.0)
term_1 = np.prod(np.sqrt(b / (b + input_vars)), 1)
scale = np.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))
scaled_z = z[ None, : , : ] / scale[ : , None , : ]
scaled_z_minus_m = scaled_z
r2b = np.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + np.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \
2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
term_2 = np.exp(-r2b)
scale = np.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))
scaled_z = z[ None, : , : ] / scale[ : , None , : ]
scaled_m = input_means / scale
scaled_m = np.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])
scaled_z_minus_m = scaled_z - scaled_m
r2b = np.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + np.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \
2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
term_3 = np.exp(-r2b)
psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3
psi2_computed = np.transpose(psi2_computed, [ 1, 2, 0 ])
return psi2_computed