本文整理汇总了Python中theano.tensor.nnet.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python nnet.conv2d方法的具体用法?Python nnet.conv2d怎么用?Python nnet.conv2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor.nnet
的用法示例。
在下文中一共展示了nnet.conv2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_mean_subtraction
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def local_mean_subtraction(input, kernel_size=5):
input_shape = (input.shape[0], 1, input.shape[1], input.shape[2])
input = input.reshape(input_shape).astype(floatX)
X = T.tensor4(dtype=floatX)
filter_shape = (1, 1, kernel_size, kernel_size)
filters = mean_filter(kernel_size).reshape(filter_shape)
filters = shared(_asarray(filters, dtype=floatX), borrow=True)
mean = conv2d(input=X,
filters=filters,
input_shape=input.shape,
filter_shape=filter_shape,
border_mode='half')
new_X = X - mean
f = function([X], new_X)
return f(input)
示例2: get_output
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def get_output(self, input, **kwargs):
var_shape = kwargs.get('var_shape', False)
if var_shape:
input_shape = None
else:
input_shape = self.input_shape
lin_output = conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
border_mode=self.mode,
subsample=self.subsample,
input_shape=input_shape
)
if self.batch_norm:
lin_output = self.bn_layer.get_output(lin_output)
elif not self.no_bias:
lin_output += self.b.dimshuffle('x', 0, 'x', 'x')
return self.activation(lin_output)
示例3: predict
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = Tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
else:
pooled_out = myMaxPool(conv_out, ps=self.poolsize, method=self.max_pool_method)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
# ========================================================================================
示例4: __init__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def __init__(self, input, output_maps, input_maps, filter_height, filter_width, poolsize=(2,2)):
self.input = input
self.bound = np.sqrt(6./(input_maps*filter_height*filter_width + output_maps*filter_height*filter_width//np.prod(poolsize)))
self.w = theano.shared(np.asarray(np.random.uniform(low=-self.bound,high=self.bound,size=(output_maps, input_maps, filter_height, filter_width)),dtype=input.dtype))
self.b = theano.shared(np.asarray(np.random.uniform(low=-.5, high=.5, size=(output_maps)),dtype=input.dtype))
self.conv_out = conv2d(input=self.input, filters=self.w)
self.pooled_out = downsample.max_pool_2d(self.conv_out, ds=poolsize, ignore_border=True)
self.output = T.tanh(self.pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
示例5: __init__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def __init__(self, input, output_maps, input_maps, filter_height, filter_width, maxpool=None):
self.input = input
self.w = theano.shared(self.ortho_weights(output_maps,input_maps,filter_height,filter_width),borrow=True)
self.b = theano.shared(np.zeros((output_maps,), dtype=theano.config.floatX),borrow=True)
self.conv_out = conv2d(input=self.input, filters=self.w, border_mode='half')
if maxpool:
self.conv_out = downsample.max_pool_2d(self.conv_out, ds=maxpool, ignore_border=True)
self.output = T.nnet.elu(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
示例6: conv_encoder
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def conv_encoder(tparams, state_below, options, prefix='conv_enc',
one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
# state_below : maxlen X n_samples X dim_word_src
# mask : maxlen X n_samples
# data = (n_samples, dim, maxlen, 1)
# kernel = (nkernels, dim, width, 1)
maxlen = state_below.shape[0]
n_samples = state_below.shape[1]
dim = state_below.shape[2]
data = state_below.dimshuffle(1,2,0,'x')
# data : n_samples X dim X maxlen X 1
W = tparams[_p(prefix, 'convW')]
b = tparams[_p(prefix, 'convB')]
#conv_out = dnn_conv(data, W, border_mode='valid', subsample=(stride,1), precision='float32')
output = dnn_conv(data, W, border_mode='half', precision='float32')
#conv_out = conv2d(data, W, border_mode='valid')
#conv_out = conv2d(data, W, input_shape=(8, 256, 450, 1), filter_shape=(64, 1, 4, 1), border_mode='valid')
if curr_width % 2 == 0:
output = output[:,:,:-1,:]
output = tensor.nnet.relu(output + b.dimshuffle('x',0,'x','x'))
output = dnn_pool(output, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))
#output = tensor.nnet.sigmoid(conv_out)
# output : n_samples X nkernels X (maxlen-width+1) X 1
#output = output.dimshuffle(2,0,1,3).squeeze()
output = output.dimshuffle(2,0,1,3)[:,:,:,0]
# NOTE : when we pass 1 or 2 instead of 0, get IndexError: index out of bounds
# not sure why squeeze wouldn't work though
# output : (maxlen-width+1) X n_samples X nkernels
return output
# emb : maxlen X n_samples X dim_word_src
示例7: multi_scale_conv_encoder
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def multi_scale_conv_encoder(tparams, state_below, options, prefix='conv_enc',
one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
# state_below.shape = (maxlen_x_pad + 2*pool_stride, n_samples, dim_word_src)
# mask.shape = (maxlen_x_pad/pool_stride, n_samples)
assert len(width) == len(nkernels)
data = state_below.dimshuffle(1,2,0,'x')
# data.shape = (n_samples, dim_word_src, maxlen_x_pad + 2*pool_stride, 1)
W = [tparams[_p(prefix, 'convW')+str(idx)] for idx in range(len(width))]
b = [tparams[_p(prefix, 'convB')+str(idx)] for idx in range(len(width))]
output = []
for idx in range(len(width)):
curr_width = width[idx]
output.append(dnn_conv(data, W[idx], border_mode='half', precision='float32'))
# output[idx].shape = (n_samples, nkernels[idx], (maxlen_x_pad + 2*pool_stride), 1)
if curr_width % 2 == 0:
output[idx] = (output[idx])[:,:,:-1,:] # for filters with an even numbered width, half convolution yields an output whose length is 1 longer than the input, hence discarding the last one here. For more detail, consult http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv2d
output[idx] = tensor.nnet.relu(output[idx] + b[idx].dimshuffle('x',0,'x','x'))
result = tensor.concatenate(output, axis=1)
# result.shape = (n_samples, sum(nkernels), (maxlen_x_pad + 2*pool_stride), 1)
result = dnn_pool(result, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))
# result.shape = (n_samples, sum(nkernels), (maxlen_x_pad/pool_stride + 2), 1)
result = result.dimshuffle(2,0,1,3)[1:-1,:,:,0]
# We get rid of the first and the last result and shuffle.
# result.shape = (maxlen_x_pad/pool_stride, n_samples, sum(nkernels))
return result
示例8: conv1d_sc
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def conv1d_sc(input, filters, input_shape=None, filter_shape=None,
border_mode='valid', subsample=(1,)):
"""
Using conv2d with a single input channel.
border_mode has to be 'valid' at the moment.
"""
if border_mode != 'valid':
log.error("Unsupported border_mode for conv1d_sc: "
"%s" % border_mode)
raise RuntimeError("Unsupported border_mode for conv1d_sc: "
"%s" % border_mode)
image_shape = input_shape
if image_shape is None:
image_shape_sc = None
else:
# (b, c, i0) to (b, 1, c, i0)
image_shape_sc = (image_shape[0], 1, image_shape[1], image_shape[2])
if filter_shape is None:
filter_shape_sc = None
else:
filter_shape_sc = (filter_shape[0], 1, filter_shape[1],
filter_shape[2])
input_sc = input.dimshuffle(0, 'x', 1, 2)
# We need to flip the channels dimension because it will be convolved over.
filters_sc = filters.dimshuffle(0, 'x', 1, 2)[:, :, ::-1, :]
conved = conv2d(input_sc, filters_sc, input_shape=image_shape_sc,
filter_shape=filter_shape_sc,
subsample=(1, subsample[0]))
return conved[:, :, 0, :] # drop the unused dimension
示例9: conv1d_mc0
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def conv1d_mc0(input, filters, input_shape=None, filter_shape=None,
border_mode='valid', subsample=(1,)):
"""
Using conv2d with width == 1.
"""
image_shape = input_shape
if image_shape is None:
image_shape_mc0 = None
else:
# (b, c, i0) to (b, c, 1, i0)
image_shape_mc0 = (image_shape[0], image_shape[1], 1, image_shape[2])
if filter_shape is None:
filter_shape_mc0 = None
else:
filter_shape_mc0 = (filter_shape[0], filter_shape[1], 1,
filter_shape[2])
input_mc0 = input.dimshuffle(0, 1, 'x', 2)
filters_mc0 = filters.dimshuffle(0, 1, 'x', 2)
conved = conv2d(
input_mc0, filters_mc0, input_shape=image_shape_mc0,
filter_shape=filter_shape_mc0, subsample=(1, subsample[0]),
border_mode=border_mode)
return conved[:, :, 0, :] # drop the unused dimension
示例10: conv1d_mc1
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def conv1d_mc1(input, filters, input_shape=None, filter_shape=None,
border_mode='valid', subsample=(1,)):
"""
Using conv2d with height == 1.
"""
image_shape = input_shape
if image_shape is None:
image_shape_mc1 = None
else:
# (b, c, i0) to (b, c, i0, 1)
image_shape_mc1 = (image_shape[0], image_shape[1], image_shape[2], 1)
if filter_shape is None:
filter_shape_mc1 = None
else:
filter_shape_mc1 = (filter_shape[0], filter_shape[1],
filter_shape[2], 1)
input_mc1 = input.dimshuffle(0, 1, 2, 'x')
filters_mc1 = filters.dimshuffle(0, 1, 2, 'x')
conved = conv2d(
input_mc1, filters_mc1, input_shape=image_shape_mc1,
filter_shape=filter_shape_mc1, subsample=(subsample[0], 1),
border_mode=border_mode)
return conved[:, :, :, 0] # drop the unused dimension
示例11: conv1d
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def conv1d(sequences, masks, **kwargs):
"""Wraps Theano conv2d to perform 1D convolution.
Parameters
----------
sequence : :class:`~theano.Variable`
(batch_size, length)
masks : :class:`~theano.Variable`
(num_filters, filter_length)
**kwargs
Will be passed to `conv2d`
Returns
-------
result : :class:`~theano.Variable`
(batch_size, num_filters, position)
"""
# For testability
sequences = tensor.as_tensor_variable(sequences)
masks = tensor.as_tensor_variable(masks)
image = sequences.dimshuffle('x', 'x', 0, 1)
filters = masks.dimshuffle(0, 'x', 'x', 1)
result = conv2d(image, filters, **kwargs)
# Now number of rows is the actual batch size
result = result.dimshuffle(2, 1, 3, 0)
return result.reshape(result.shape[:-1], ndim=3)
示例12: _train_fprop
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def _train_fprop(self, state_below):
conv_out = conv2d(state_below, self.W,
border_mode=self.border_mode,
subsample=self.stride)
return conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
示例13: conv
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def conv( x, w, b=None ):
s = int(np.floor(w.get_value().shape[-1]/2.))
z = conv2d(x, w, border_mode='full')[:, :, s:-s, s:-s]
if b is not None:
z += b.dimshuffle('x', 0, 'x', 'x')
return z
示例14: build
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def build(self, input_x, poolsize=(2, 2)):
# convolve input feature maps with filters
conv_out = tnnet.conv2d(input=input_x, filters=self.W,filter_shape=self.filter_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
elif self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
else:
pooled_out = myMaxPool(conv_out, ps=self.poolsize, method=self.max_pool_method)
self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
示例15: test_conv
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import conv2d [as 别名]
def test_conv(self):
for conv_op in [conv.conv2d, conv2d]:
for border_mode in ['valid', 'full']:
image_shape = (2, 2, 4, 5)
filter_shape = (2, 2, 2, 3)
image_dim = len(image_shape)
filter_dim = len(filter_shape)
input = tensor.TensorType(
theano.config.floatX,
[False] * image_dim)(name='input')
filters = tensor.TensorType(
theano.config.floatX,
[False] * filter_dim)(name='filter')
ev_input = tensor.TensorType(
theano.config.floatX,
[False] * image_dim)(name='ev_input')
ev_filters = tensor.TensorType(
theano.config.floatX,
[False] * filter_dim)(name='ev_filters')
def sym_conv2d(input, filters):
return conv_op(input, filters, border_mode=border_mode)
output = sym_conv2d(input, filters).flatten()
yv = tensor.Rop(output, [input, filters], [ev_input, ev_filters])
rop_f = function([input, filters, ev_input, ev_filters],
yv, on_unused_input='ignore')
sy, _ = theano.scan(lambda i, y, x1, x2, v1, v2:
(tensor.grad(y[i], x1) * v1).sum() +
(tensor.grad(y[i], x2) * v2).sum(),
sequences=tensor.arange(output.shape[0]),
non_sequences=[output, input, filters,
ev_input, ev_filters])
scan_f = function([input, filters, ev_input, ev_filters], sy,
on_unused_input='ignore')
dtype = theano.config.floatX
image_data = numpy.random.random(image_shape).astype(dtype)
filter_data = numpy.random.random(filter_shape).astype(dtype)
ev_image_data = numpy.random.random(image_shape).astype(dtype)
ev_filter_data = numpy.random.random(filter_shape).astype(dtype)
v1 = rop_f(image_data, filter_data, ev_image_data, ev_filter_data)
v2 = scan_f(image_data, filter_data, ev_image_data, ev_filter_data)
assert numpy.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))