本文整理汇总了Python中theano.tensor.transpose函数的典型用法代码示例。如果您正苦于以下问题:Python transpose函数的具体用法?Python transpose怎么用?Python transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了transpose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gibbs
def gibbs(self, sample, countStep, function_mode, h_lid_type = 0):
# templates of Varibles for calculate h_lid by previous value
calc_h_lid = lambda h_lid_old, sample: T.nnet.sigmoid(T.dot(sample, self.W) + self.hBiasbase) #+ T.dot(h_lid_old, self.W2.T)
calc_hBiases = lambda h_lid: self.hBiasbase + T.dot(h_lid, self.W2.T)
calc_vBiases = lambda h_lid: self.vBiasbase + T.dot(h_lid, self.W1.T)
# Parameter: countGibbsStep
def gibbsSamplingForAllTime(sample, start_h_lid):
def gibbsSamplingForOneStepTime(sample, h_lid):
vBias = calc_vBiases(h_lid)
hBias = calc_hBiases(h_lid)
res, updates = self.bm.gibbs(sample, self.W, vBias, hBias, countStep, function_mode)
#res = res[-1]
if h_lid_type == 0:
return [res, calc_h_lid(h_lid, sample), vBias, hBias], updates
else:
return [res, calc_h_lid(h_lid, res), vBias, hBias], updates
[sample_res, hLids, vBiases, hBiases], updates = theano.scan(gibbsSamplingForOneStepTime, sequences=sample, outputs_info=[None, start_h_lid, None, None])
return sample_res, hLids, vBiases, hBiases, updates
# usual gibbs-sampling
if len(sample.broadcastable) == 2:
# matrix! it is one object
res, hLids, vBiases, hBiases, updates = gibbsSamplingForAllTime([sample], self.h_lid_0)
hLids = T.concatenate([[self.h_lid_0], hLids[0:-1]])
return res, hLids, updates, vBiases, hBiases
else:
new_dim = T.cast(sample.shape[0], 'int32');
my_sample = T.transpose(sample, (1, 0, 2))
h_lids_start = T.reshape(T.repeat(self.h_lid_0, new_dim), (self.hidden, new_dim)).T
res, hLids, vBiases, hBiases, updates = gibbsSamplingForAllTime(my_sample, h_lids_start)
res = T.transpose(res, (1, 0, 2))
hLids = T.concatenate([[h_lids_start], hLids[0:-1]])
hLids = T.transpose(hLids, (1, 0, 2))
vBiases = T.transpose(vBiases, (1, 0, 2))
hBiases = T.transpose(hBiases, (1, 0, 2))
return res, hLids, updates, vBiases, hBiases
示例2: full
def full(self, X, Z=None):
X, Xc, Z = self._common(X, Z)
if Z is None:
return tt.dot(Xc, tt.transpose(Xc))
else:
Zc = tt.sub(Z, self.c)
return tt.dot(Xc, tt.transpose(Zc))
示例3: theano_kernel_derivative
def theano_kernel_derivative(imshp,kshp,featshp,stride=1):
features = T.tensor4(dtype=theano.config.floatX)
kernel = T.tensor4(dtype=theano.config.floatX)
image = T.tensor4(dtype=theano.config.floatX)
# Need to transpose first two dimensions of kernel, and reverse index kernel image dims (for correlation)
kernel_rotated = T.transpose(kernel[:,:,::-1,::-1],axes=[1,0,2,3])
featshp_logical = (featshp[0],featshp[1],featshp[2]*stride,featshp[3]*stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
image_estimate = conv2d(features,kernel_rotated,border_mode='full',
image_shape=featshp,filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],kshp_logical=kshp[2:])
image_error = image - image_estimate
image_error_rot = T.transpose(image_error,[1,0,2,3])[:,:,::-1,::-1]
imshp_rot = (imshp[1],imshp[0],imshp[2],imshp[3])
featshp_rot = (featshp[1],featshp[0],featshp[2],featshp[3])
features_rot = T.transpose(features,[1,0,2,3])
featshp_rot_logical = (featshp_rot[0],featshp_rot[1],featshp_rot[2]*stride,featshp_rot[3]*stride)
kernel_grad_rot = -conv2d(image_error_rot,features_rot,
image_shape=imshp_rot,filter_shape=featshp_rot,
imshp_logical=imshp_rot[1:],kshp_logical=featshp_rot_logical[2:])
kernel_grad = T.transpose(kernel_grad_rot,[1,0,2,3])
return function(inputs=[image,features,kernel],outputs=kernel_grad)
示例4: T_subspacel1_slow_shrinkage_conv
def T_subspacel1_slow_shrinkage_conv(a, L, lam_sparse, lam_slow, imshp,kshp,featshp,stride=(1,1),small_value=.001):
featshp = (imshp[0],kshp[0],featshp[2],featshp[3]) # num images, features, szy, szx
features = T.reshape(T.transpose(a),featshp,ndim=4)
amp = T.sqrt(features[:,::2,:,:]**2 + features[:,1::2,:,:]**2 + small_value)
#damp = amp[:,1:] - amp[:,:-1]
# compose slow shrinkage with subspace l1 shrinkage
# slow shrinkage
div = T.zeros_like(amp)
d1 = amp[1:,:,:,:] - amp[:-1,:,:,:]
d2 = d1[1:,:,:,:] - d1[:-1,:,:,:]
div = T.set_subtensor(div[1:-1,:,:,:], -d2)
div = T.set_subtensor(div[0,:,:,:], -d1[0,:,:,:])
div = T.set_subtensor(div[-1,:,:,:], d1[-1,:,:,:])
slow_amp_shrinkage = 1 - (lam_slow / L) * (div / amp)
slow_amp_value = T.switch(T.gt(slow_amp_shrinkage, 0), slow_amp_shrinkage, 0)
slow_shrinkage_prox_a = slow_amp_value * features[:, ::2, :,:]
slow_shrinkage_prox_b = slow_amp_value * features[:,1::2, :,:]
# subspace l1 shrinkage
amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a ** 2 + slow_shrinkage_prox_b ** 2)
#amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
amp_shrinkage = 1. - (lam_sparse / L) / amp_slow_shrinkage_prox
amp_value = T.switch(T.gt(amp_shrinkage, 0.), amp_shrinkage, 0.)
subspacel1_prox = T.zeros_like(features)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:, ::2, :,:], amp_value * slow_shrinkage_prox_a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:,1::2, :,:], amp_value * slow_shrinkage_prox_b)
reshape_subspacel1_prox = T.transpose(T.reshape(subspacel1_prox,(featshp[0],featshp[1]*featshp[2]*featshp[3]),ndim=2))
return reshape_subspacel1_prox
示例5: nn_param
def nn_param(params,input):
from theano import tensor as T
from matplotlib import pyplot as plt
layers=len(params)
if(layers==1):
lnum=0
p=T.nnet.sigmoid(T.dot(input,params[lnum][0][1])+params[lnum][1][1])
y=T.nnet.sigmoid(T.dot(p,T.transpose(params[lnum][0][1]))+params[lnum][2][1])
yval=y.eval()
return yval
for lnum in range(layers):
if (lnum==0):
p=T.nnet.sigmoid(T.dot(input,params[lnum][0][1])+params[lnum][1][1])
y=T.nnet.sigmoid(T.dot(p,T.transpose(params[lnum][0][1]))+params[lnum][2][1])
yval=y.eval()
plt.plot(yval,label='%d'%lnum)
else:
p=T.nnet.sigmoid(T.dot(yval,params[lnum][0][1])+params[lnum][1][1])
y=T.nnet.sigmoid(T.dot(p,T.transpose(params[lnum][0][1]))+params[lnum][2][1])
yval=y.eval()
plt.plot(yval)
plt.legend()
plt.show()
return yval
示例6: nin
def nin(X, param):
w1, w2, w3, b1, b2, b3 = param
X = X.dimshuffle(0, 1, 'x', 2, 3) # (n,32,1,r,c)
w1 = w1.dimshuffle(0, 1, 2, 'x', 3, 4) # (64,32,16,1,3,3)
w2 = w2.dimshuffle(0, 1, 'x', 2, 'x', 'x') # (64,32,1,16,1,1)
w3 = w3.dimshuffle(0, 1, 2, 'x', 'x') # (64,2,32,1,1)
b1 = b1.dimshuffle(0, 1, 'x', 2, 'x', 'x') # (64,32,1,16,1,1)
b2 = b2.dimshuffle(0, 1, 'x', 2, 'x', 'x') # (64,32,1,1,1,1)
b3 = b3.dimshuffle(0, 'x', 1, 'x', 'x') # (64,1,2,1,1)
indexi = T.arange(w1.shape[0], dtype='int32') # (0:64)
indexi = T.repeat(indexi, w1.shape[1], axis=0)
indexj = T.arange(w1.shape[1], dtype='int32') # (0:64)
indexj = T.tile(indexj, w1.shape[0])
results, updates = scan(fn=metaOp1,
sequences=[indexi, indexj],
outputs_info=None,
non_sequences=[X, w1, w2, b1, b2],
strict=True) # (64*32,n,1,r,c)
metaShape1 = results.shape[-4], results.shape[-2], results.shape[-1]
reshaped1 = results.reshape((w1.shape[0], w1.shape[1]) + metaShape1) # (64,32,n,r,c)
permuted1 = T.transpose(reshaped1, axes=(0, 2, 1, 3, 4)) # (64,n,32,r,c)
indexi = T.arange(w1.shape[0], dtype='int32') # (0:64)
results, updates = scan(fn=metaOp2,
sequences=[indexi],
outputs_info=None,
non_sequences=[permuted1, w3, b3],
strict=True) # (64,n,2,r,c)
permuted2 = T.transpose(results, axes=(1, 0, 2, 3, 4)) # (n,64,2,r,c)
metaShape2 = permuted2.shape[-2], permuted2.shape[-1]
reshaped2 = permuted2.reshape((permuted2.shape[0], -1) + metaShape2) # (n,128,r,c)
return reshaped2
示例7: _pooling_function
def _pooling_function(self, inputs, pool_size, strides, border_mode, dim_ordering):
if pool_size[0]<-1:
# k-max pooling
input_layer = T.transpose(inputs, axes=(0, 1, 3, 2))
sorted_values = T.argsort(input_layer, axis=3)
topmax_indexes = sorted_values[:, :, :, -self.k:]
# sort indexes so that we keep the correct order within the sentence
topmax_indexes_sorted = T.sort(topmax_indexes)
# given that topmax only gives the index of the third dimension, we need to generate the other 3 dimensions
dim0 = T.arange(0, input_layer.shape[0]).repeat(input_layer.shape[1] * input_layer.shape[2] * self.k)
dim1 = T.arange(0, input_layer.shape[1]).repeat(self.k * input_layer.shape[2]).reshape((1, -1)).repeat(
input_layer.shape[0],
axis=0).flatten()
dim2 = T.arange(0, input_layer.shape[2]).repeat(self.k).reshape((1, -1)).repeat(
input_layer.shape[0] * input_layer.shape[1],
axis=0).flatten()
dim3 = topmax_indexes_sorted.flatten()
x = T.transpose(
input_layer[dim0, dim1, dim2, dim3].reshape(
(input_layer.shape[0], input_layer.shape[1], input_layer.shape[2], self.k)),
axes=(0, 1, 3, 2))
return x
else:
return super(MaxPooling2DWrapper, self)._pooling_function(inputs, pool_size, strides, border_mode, dim_ordering)
示例8: sgru3
def sgru3(X, h, W, U, b, t):
t = 0
z_t = T.tanh(T.dot(X,W[t*2+0]) + b[t*2+0])
r_t = (T.dot(h,U[t*2+0]) + b[t*2+1])
z_t2 = (T.dot(X,W[t*2+1]) + b[t*2+2])
r_t2 = T.tanh(T.dot(h,U[t*2+1]) + b[t*2+3])
return T.tanh(T.dot(z_t*r_t,T.transpose(U[t*2+2])) + T.dot(z_t2*r_t2,T.transpose(U[t*2+3])))
示例9: get_output_for
def get_output_for(self, input, **kwargs):
'''
Computes 2D FFT. Input layer must have dimension [n, 2, nx, ny]
'''
if self.is_3d:
n, nc, nx, ny, nt = self.data_shape
lin = T.transpose(input, axes=(0, 4, 1, 2, 3))
lin = lin.reshape((-1, nc, nx, ny))
lout, updates = theano.scan(self.transform, sequences=lin)
lout = lout.reshape((-1, nt, nc, nx, ny))
out = T.transpose(lout, axes=(0, 2, 3, 4, 1))
return out
# def loop_over_n(i, arr):
# out, updates = theano.scan(self.transform,
# sequences=arr[:, :, i])[0]
# return out
# nt = self.data_shape[-1]
# out, updates = theano.scan(loop_over_n,
# non_sequences=input,
# sequences=xrange(nt))
# return out
out, updates = theano.scan(self.transform, sequences=input)
return out
示例10: T_l2_cost_conv
def T_l2_cost_conv(x,a,A,imshp,kshp,mask=True):
"""
xsz*ysz*nchannels, nimages = x.shape
xsz*ysz*nfeat, nimages = a.shape
xsz*ysz*nchannels, nfeat = A.shape
"""
#imshp = num images, channels, szy, szx
#kshp = features, channels, szy, szx
#featshp = num images, features, szy, szx
featshp = (imshp[0],kshp[0],imshp[2] - kshp[2] + 1,imshp[3] - kshp[3] + 1) # num images, features, szy, szx
image = T.reshape(T.transpose(x),imshp)
kernel = T.reshape(T.transpose(A),kshp)
features = T.reshape(T.transpose(a),featshp)
# Need to transpose first two dimensions of kernel, and reverse index kernel image dims (for correlation)
kernel_rotated = T.transpose(kernel[:,:,::-1,::-1],axes=[1,0,2,3])
image_estimate = conv2d(features,kernel_rotated,border_mode='full')
if mask:
image_error_temp = image - image_estimate
image_error = T.zeros_like(image_error_temp)
image_error = T.set_subtensor(image_error[:,:,(kshp[2]-1):(imshp[2]-kshp[2]+1),(kshp[3]-1):(imshp[3]-kshp[3]+1)],
image_error_temp[:,:,(kshp[2]-1):(imshp[2]-kshp[2]+1),(kshp[3]-1):(imshp[3]-kshp[3]+1)])
else:
image_error = image - image_estimate
return .5*T.sum(image_error **2)
示例11: __init
def __init():
dataset = T.matrix("dataset", dtype=config.globalFloatType())
trans_dataset = T.transpose(dataset)
dot_mul = T.dot(dataset, trans_dataset)
l2 = T.sqrt(T.sum(T.square(dataset), axis=1))
# p =printing.Print("l2")
# l2 = p(l2)
l2_inv2 = T.inv(l2).dimshuffle(['x', 0])
# p =printing.Print("l2_inv2")
# l2_inv2 = p(l2_inv2)
l2_inv1 = T.transpose(l2_inv2)
# p =printing.Print("l2_inv1")
# l2_inv1 = p(l2_inv1)
l2_inv = T.dot(l2_inv1, l2_inv2)
# p =printing.Print("l2_inv")
# l2_inv = p(l2_inv)
affinty = (T.mul(dot_mul, l2_inv) + 1) / 2
globals()['__affinty_fun'] = theano.function(
[dataset],
[affinty],
allow_input_downcast=True
)
示例12: kmaxpooling_output
def kmaxpooling_output(input):
'''
实现 k-max pooling
1. 先排序
2. 再分别取出前k个值
:param k: k top higiest value
:type k: int
:return:
'''
input = T.transpose(input, axes=(0, 1, 3, 2))
sorted_values = T.argsort(input, axis=3)
topmax_indexes = sorted_values[:, :, :, -k:]
# sort indexes so that we keep the correct order within the sentence
topmax_indexes_sorted = T.sort(topmax_indexes)
# given that topmax only gives the index of the third dimension, we need to generate the other 3 dimensions
dim0 = T.arange(0, input.shape[0]).repeat(input.shape[1] * input.shape[2] * k)
dim1 = T.arange(0, input.shape[1]).repeat(k * input.shape[2]).reshape((1, -1)).repeat(input.shape[0],
axis=0).flatten()
dim2 = T.arange(0, input.shape[2]).repeat(k).reshape((1, -1)).repeat(input.shape[0] * input.shape[1],
axis=0).flatten()
dim3 = topmax_indexes_sorted.flatten()
return T.transpose(
input[dim0, dim1, dim2, dim3].reshape((input.shape[0], input.shape[1], input.shape[2], k)),
axes=(0, 1, 3, 2))
示例13: categorical_crossentropy_segm
def categorical_crossentropy_segm(prediction_proba, targets):
'''
MODIFICATIONS:
- reshape from image-size to array and back
'''
shape = T.shape(prediction_proba)
pred_mod1 = T.transpose(prediction_proba, (0,2,3,1))
pred_mod = T.reshape(pred_mod1, (-1,shape[1]))
if prediction_proba.ndim == targets.ndim:
targ_mod1 = T.transpose(targets,(0,2,3,1))
targ_mod = T.reshape(targ_mod1,(-1,shape[1]))
else:
targ_mod = T.reshape(targets, (-1,))
results = categorical_crossentropy(pred_mod, targ_mod)
results = T.reshape(results, (shape[0],shape[2],shape[3]))
# QUICK IMPLEMENTATION FOR TWO SPECIFIC CLASSES. NEEDS GENERALIZATION
# Weights depending on class occurency:
weights = (1.02275, 44.9647)
cars_indx, not_cars_indx = T.nonzero(targets), T.nonzero(T.eq(targets,0))
T.set_subtensor(results[cars_indx], results[cars_indx]*float32(weights[1]) )
T.set_subtensor(results[not_cars_indx], results[not_cars_indx]*float32(weights[0]) )
return T.sum(results, axis=(1,2))
示例14: __init__
def __init__(self, rng, input, n_feature_maps, n_in, n_out, b_size=5, read_file=False, W=None, b=None):
# input dim should be: batch_size x n_feature_maps x 504
# n_in and n_out should be 504 and 40 respectively
input = T.transpose(input, (1, 0, 2))
self.input = input
if read_file==False:
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6./(n_in+n_out)),
high=np.sqrt(6./(n_in+n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name='W', borrow=True)
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
embedding_list = []
for i in range(n_feature_maps):
embedding_list.append(T.tanh(T.dot(input[i], self.W) + self.b))
self.output = T.concatenate(embedding_list, axis=0)
self.output = T.reshape(self.output, (n_feature_maps, b_size, n_out))
self.params = [self.W, self.b]
self.input = T.transpose(self.input, (1, 0, 2))
self.output = T.transpose(self.output, (1, 0, 2))
示例15: _build_conditional
def _build_conditional(self, Xnew, pred_noise, diag, X, Xu, y, sigma, cov_total, mean_total):
sigma2 = tt.square(sigma)
Kuu = cov_total(Xu)
Kuf = cov_total(Xu, X)
Luu = cholesky(stabilize(Kuu))
A = solve_lower(Luu, Kuf)
Qffd = tt.sum(A * A, 0)
if self.approx == "FITC":
Kffd = cov_total(X, diag=True)
Lamd = tt.clip(Kffd - Qffd, 0.0, np.inf) + sigma2
else: # VFE or DTC
Lamd = tt.ones_like(Qffd) * sigma2
A_l = A / Lamd
L_B = cholesky(tt.eye(Xu.shape[0]) + tt.dot(A_l, tt.transpose(A)))
r = y - mean_total(X)
r_l = r / Lamd
c = solve_lower(L_B, tt.dot(A, r_l))
Kus = self.cov_func(Xu, Xnew)
As = solve_lower(Luu, Kus)
mu = self.mean_func(Xnew) + tt.dot(tt.transpose(As), solve_upper(tt.transpose(L_B), c))
C = solve_lower(L_B, As)
if diag:
Kss = self.cov_func(Xnew, diag=True)
var = Kss - tt.sum(tt.square(As), 0) + tt.sum(tt.square(C), 0)
if pred_noise:
var += sigma2
return mu, var
else:
cov = (self.cov_func(Xnew) - tt.dot(tt.transpose(As), As) +
tt.dot(tt.transpose(C), C))
if pred_noise:
cov += sigma2 * tt.identity_like(cov)
return mu, stabilize(cov)