本文整理汇总了Python中theano.tensor.squeeze方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.squeeze方法的具体用法?Python tensor.squeeze怎么用?Python tensor.squeeze使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.squeeze方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: squeeze
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
# TODO: `keras_shape` inference.
shape = list(x.shape)
shape.pop(axis)
return T.reshape(x, tuple(shape))
示例2: squeeze
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
x = T.addbroadcast(x, axis)
return T.squeeze(x)
示例3: squeeze
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def squeeze(x, axis):
"""Remove a 1-dimension from the tensor at index "axis".
"""
shape = list(x.shape)
shape.pop(axis)
y = T.reshape(x, tuple(shape))
if hasattr(x, '_keras_shape'):
kshape = list(x._keras_shape)
kshape.pop(axis)
y._keras_shape = tuple(kshape)
return y
示例4: squeeze
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
broadcastable = x.broadcastable[:axis] + x.broadcastable[axis+1:]
x = T.patternbroadcast(x, [i == axis for i in range(x.type.ndim)])
x = T.squeeze(x)
x = T.patternbroadcast(x, broadcastable)
return x
示例5: fwd
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def fwd(self, x, V, A, L):
"""
x : signal
V : eigenvectors
A : area
L : eigenvalues
"""
V = V[:,:self.K]
L = L[:self.K]
sampleLoc = (L.dimshuffle(0,'x') - self.evalSamples.dimshuffle('x',0)) / self.dEval
basis = self.cubicBSpline(sampleLoc)
basis = basis.dimshuffle('x',0,1)
rho = T.sqrt(T.sum(A))
def step(f, beta, rho, A, V):
ghat = T.dot(basis, beta.squeeze()).flatten()
transl = rho * T.dot(V, ghat.dimshuffle(0,'x') * V.T)
return rho * T.dot((V * f.dimshuffle(0,'x')).T, A.dimshuffle(0,'x') * transl) # N x K
desc, _ = theano.scan(fn=step, non_sequences=[rho,A,V],
sequences=[x.T,self.beta])
desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
desc = T.abs_(desc)
return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b)
示例6: fwd_old
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def fwd_old(self, x, V, A, L):
"""
x : signal
V : eigenvectors
A : area
L : eigenvalues
"""
V = V[:,:self.K]
L = L[:self.K]
sampleLoc = (L.dimshuffle(0,'x') - self.evalSamples.dimshuffle('x',0)) / self.dEval
basis = self.cubicBSpline(sampleLoc)
basis = basis.dimshuffle('x',0,1)
rho = T.sqrt(T.sum(A))
# weight the basis columns for each input function to generate a ghat
# Q x K, a window for each input function
ghat = T.squeeze(T.batched_dot(
T.tile(basis, [self.nin, 1, 1]),
self.beta)[:,:,0]) # crazy stuff here, why doesn't squeeze work?
# Q x K x N
V_ = T.tile(V.dimshuffle('x',1,0), [self.nin, 1, 1])
# Q x K x N
tmp = (ghat.dimshuffle(0,'x',1) * V).dimshuffle(0,2,1)
# Q x N x N
transl = rho * T.batched_dot(V_.dimshuffle(0,2,1), tmp)
transl = A.dimshuffle('x',0,'x') * transl
# Q x K x N
tmp = (V.dimshuffle(0,'x',1) * x.dimshuffle(0,1,'x')).dimshuffle(1,2,0)
# Q x K x N
desc = rho * T.batched_dot(tmp, transl)
desc = T.abs_(desc)
desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b)
示例7: conv1d
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
"""1D convolution.
# Arguments
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first"
dilation_rate: integer.
"""
data_format = normalize_data_format(data_format)
kernel_shape = int_shape(kernel)
if padding == 'causal':
# causal (dilated) convolution:
if not kernel_shape:
raise AttributeError('Causal padding requires kernel._keras_shape set.')
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
shape = int_shape(x)
if data_format == 'channels_last':
# original shape: (batch, length, input_dim)
# add dim to x to have (batch, length, 1, input_dim)
x = expand_dims(x, 2)
# update x._keras_shape
if shape is not None:
x._keras_shape = (shape[0], shape[1], 1, shape[2])
else:
# original shape: (batch, input_dim, length)
# add dim to x to have (batch, input_dim, length, 1)
x = expand_dims(x, 3)
# update x._keras_shape
if shape is not None:
x._keras_shape = (shape[0], shape[1], shape[2], 1)
# update dilation rate, strides
dilation_rate = (dilation_rate, 1)
strides = (strides, 1)
# add dim to kernel (always same format independently of data_format)
# i.e. (rows, 1, input_depth, depth)
kernel = expand_dims(kernel, 1)
output = conv2d(x, kernel,
strides=strides, padding=padding,
data_format=data_format, dilation_rate=dilation_rate)
# remove added dim
if data_format == 'channels_last':
output = squeeze(output, 2)
else:
output = squeeze(output, 3)
return output
示例8: conv1d
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import squeeze [as 别名]
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
"""1D convolution.
# Arguments
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first"
dilation_rate: integer.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ', data_format)
if hasattr(kernel, '_keras_shape'):
kernel_shape = kernel._keras_shape
else:
kernel_shape = None
if padding == 'causal':
# causal (dilated) convolution:
if not kernel_shape:
raise AttributeError('Causal padding requires kernel._keras_shape set.')
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if hasattr(x, '_keras_shape'):
shape = x._keras_shape
else:
shape = None
if data_format == 'channels_last':
# original shape: (batch, length, input_dim)
# add dim to x to have (batch, length, 1, input_dim)
x = expand_dims(x, 2)
# update x._keras_shape
if shape is not None:
x._keras_shape = (shape[0], shape[1], 1, shape[2])
else:
# original shape: (batch, input_dim, length)
# add dim to x to have (batch, input_dim, length, 1)
x = expand_dims(x, 3)
# update x._keras_shape
if shape is not None:
x._keras_shape = (shape[0], shape[1], shape[2], 1)
# update dilation rate, strides
dilation_rate = (dilation_rate, 1)
strides = (strides, 1)
# add dim to kernel (always same format independently of data_format)
# i.e. (rows, 1, input_depth, depth)
kernel = expand_dims(kernel, 1)
output = conv2d(x, kernel,
strides=strides, padding=padding,
data_format=data_format, dilation_rate=dilation_rate)
# remove added dim
if data_format == 'channels_last':
output = squeeze(output, 2)
else:
output = squeeze(output, 3)
return output