本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_pool方法的典型用法代码示例。如果您正苦于以下问题:Python dnn.dnn_pool方法的具体用法?Python dnn.dnn_pool怎么用?Python dnn.dnn_pool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.sandbox.cuda.dnn
的用法示例。
在下文中一共展示了dnn.dnn_pool方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_response_normalization_2d_dnn
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def local_response_normalization_2d_dnn(in_vw, alpha, k, beta, n):
"""
using cudnn mean pooling
"""
from theano.sandbox.cuda import dnn
assert n % 2 == 1, "n must be odd"
in_var = in_vw.variable
b, ch, r, c = in_vw.symbolic_shape()
squared = T.sqr(in_var)
reshaped = squared.reshape((b, 1, ch, r * c))
pooled = dnn.dnn_pool(img=reshaped,
ws=(n, 1),
stride=(1, 1),
pad=(n // 2, 0),
mode="average_inc_pad")
unreshaped = pooled.reshape((b, ch, r, c))
# multiply by n, since we did a mean pool instead of a sum pool
return in_var / (((alpha * n) * unreshaped + k) ** beta)
示例2: conv_encoder
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def conv_encoder(tparams, state_below, options, prefix='conv_enc',
one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
# state_below : maxlen X n_samples X dim_word_src
# mask : maxlen X n_samples
# data = (n_samples, dim, maxlen, 1)
# kernel = (nkernels, dim, width, 1)
maxlen = state_below.shape[0]
n_samples = state_below.shape[1]
dim = state_below.shape[2]
data = state_below.dimshuffle(1,2,0,'x')
# data : n_samples X dim X maxlen X 1
W = tparams[_p(prefix, 'convW')]
b = tparams[_p(prefix, 'convB')]
#conv_out = dnn_conv(data, W, border_mode='valid', subsample=(stride,1), precision='float32')
output = dnn_conv(data, W, border_mode='half', precision='float32')
#conv_out = conv2d(data, W, border_mode='valid')
#conv_out = conv2d(data, W, input_shape=(8, 256, 450, 1), filter_shape=(64, 1, 4, 1), border_mode='valid')
if curr_width % 2 == 0:
output = output[:,:,:-1,:]
output = tensor.nnet.relu(output + b.dimshuffle('x',0,'x','x'))
output = dnn_pool(output, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))
#output = tensor.nnet.sigmoid(conv_out)
# output : n_samples X nkernels X (maxlen-width+1) X 1
#output = output.dimshuffle(2,0,1,3).squeeze()
output = output.dimshuffle(2,0,1,3)[:,:,:,0]
# NOTE : when we pass 1 or 2 instead of 0, get IndexError: index out of bounds
# not sure why squeeze wouldn't work though
# output : (maxlen-width+1) X n_samples X nkernels
return output
# emb : maxlen X n_samples X dim_word_src
示例3: multi_scale_conv_encoder
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def multi_scale_conv_encoder(tparams, state_below, options, prefix='conv_enc',
one_step=False, init_state=None, width=None, nkernels=None, pool_window=None, pool_stride=None, **kwargs):
# state_below.shape = (maxlen_x_pad + 2*pool_stride, n_samples, dim_word_src)
# mask.shape = (maxlen_x_pad/pool_stride, n_samples)
assert len(width) == len(nkernels)
data = state_below.dimshuffle(1,2,0,'x')
# data.shape = (n_samples, dim_word_src, maxlen_x_pad + 2*pool_stride, 1)
W = [tparams[_p(prefix, 'convW')+str(idx)] for idx in range(len(width))]
b = [tparams[_p(prefix, 'convB')+str(idx)] for idx in range(len(width))]
output = []
for idx in range(len(width)):
curr_width = width[idx]
output.append(dnn_conv(data, W[idx], border_mode='half', precision='float32'))
# output[idx].shape = (n_samples, nkernels[idx], (maxlen_x_pad + 2*pool_stride), 1)
if curr_width % 2 == 0:
output[idx] = (output[idx])[:,:,:-1,:] # for filters with an even numbered width, half convolution yields an output whose length is 1 longer than the input, hence discarding the last one here. For more detail, consult http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv2d
output[idx] = tensor.nnet.relu(output[idx] + b[idx].dimshuffle('x',0,'x','x'))
result = tensor.concatenate(output, axis=1)
# result.shape = (n_samples, sum(nkernels), (maxlen_x_pad + 2*pool_stride), 1)
result = dnn_pool(result, (pool_window, 1), stride=(pool_stride, 1), mode='max', pad=(0, 0))
# result.shape = (n_samples, sum(nkernels), (maxlen_x_pad/pool_stride + 2), 1)
result = result.dimshuffle(2,0,1,3)[1:-1,:,:,0]
# We get rid of the first and the last result and shuffle.
# result.shape = (maxlen_x_pad/pool_stride, n_samples, sum(nkernels))
return result
示例4: model
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def model(X,
h2_u, h3_u,
h2_s, h3_s,
w, w2, g2, b2, w3, g3, b3, wy
):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
f = T.concatenate([h, h2, h3], axis=1)
return [f]
示例5: compute_output
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def compute_output(self, network, in_vw):
mode = network.find_hyperparameter(["mode"])
pool_size = network.find_hyperparameter(["pool_size"])
dim = len(pool_size)
# works for sizes 2 and 3
assert dim in [2, 3]
stride = network.find_hyperparameter(["pool_stride",
"stride"],
None)
if stride is None:
stride = pool_size
pad = network.find_hyperparameter(["pool_pad", "pad"], (0,) * dim)
assert dim == len(stride) == len(pad)
if dim == 2:
pool_axes = (2, 3)
elif dim == 3:
pool_axes = (2, 3, 4)
out_shape = downsample.pool_output_shape(
input_shape=in_vw.shape,
axes=pool_axes,
pool_shape=pool_size,
strides=stride,
pads=pad)
out_var = dnn.dnn_pool(img=in_vw.variable,
ws=pool_size,
stride=stride,
pad=pad,
mode=mode)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
示例6: get_output_for
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def get_output_for(self, input, *args, **kwargs):
if not dnn_available:
raise RuntimeError("cudnn is not available.")
return dnn.dnn_pool(input, self.ds, self.strides, self.mode)
示例7: pool_dnn
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def pool_dnn(bc01, pool_shape, pool_stride, mode='max'):
"""
cuDNN pooling op.
Parameters
----------
bc01 : theano tensor
Minibatch in format (batch size, channels, rows, cols).
pool_shape : tuple
Shape of the pool region (rows, cols).
pool_stride : tuple
Strides between pooling regions (row stride, col stride).
mode : str
Flag for `mean` or `max` pooling.
Returns
-------
mx : theano tensor
The output of pooling applied to `bc01`.
"""
assert mode in ['max', 'mean']
if mode == 'mean':
raise NotImplementedError('Mean pooling is not implemented '
'in Pylearn2 using cuDNN as of '
'January 19th, 2015.')
mx = dnn_pool(bc01, tuple(pool_shape), tuple(pool_stride), mode)
return mx
示例8: get_output_for
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def get_output_for(self, input, *args, **kwargs):
out = dnn.dnn_pool(T.sqr(input), self.pool_size, self.stride,
'average')
return T.sqrt(out + self.epsilon)
示例9: __init__
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def __init__(self, inputs=None, size=(1, 1), stride=None, pad=(0, 0), mode='max', ignore_border=True):
"""
Parameters
----------
inputs : tuple(shape, `Theano.TensorType`)
tuple(shape, `Theano.TensorType`) or None describing the input to use for this layer.
`shape` will be a monad tuple representing known sizes for each dimension in the `Theano.TensorType`.
If 4D images as input, expect formatted as (batch_size, #channels, rows, cols).
size : tuple(int) or int
Downsample factor over (rows, columns). If it is an int, it will be the same size for rows and cols.
stride : tuple(int) or int
Stride size (step size), which is the number of shifts over rows/cols to get the
next pool region. If it is an int, it will be the same size for rows and cols.
pad : tuple(int) or int
(pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins. If it is an int, it will be the same
size for rows and cols.
mode : 'max', 'sum', 'average_inc_pad', 'average_exc_pad'
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ignore_border : bool
If `size` doesn't divide the input `shape`, do we include an extra row/col of
partial downsampling (False) or ignore it (True). When True, (5,5) input with size=(2,2)
will generate a (2,2) output. (3,3) otherwise.
"""
super(Pool2D, self).__init__(inputs=inputs, size=size, stride=stride, pad=pad,
mode=mode, ignore_border=ignore_border)
input_shape, self.input = self.inputs[0]
if isinstance(size, int):
size = (size, ) * 2
if stride is None:
stride = size
if isinstance(stride, int):
stride = (stride, ) * 2
if isinstance(pad, int):
pad = (pad, ) * 2
assert len(size) == len(stride) == len(pad), "Size, stride, and pad must have the same number of dimensions."
self.output_size = tuple(_pool_out_size(imgshape=input_shape,
ds=size,
st=stride,
ignore_border=ignore_border,
padding=pad))
cudnn_modes = ['max', 'average_inc_pad', 'average_exc_pad']
if has_cudnn and mode in cudnn_modes and ignore_border and self.input.ndim == 4:
self.output = dnn_pool(img=self.input,
ws=size,
stride=stride,
mode=mode,
pad=pad)
else:
self.output = pool_2d(input=self.input,
ds=size,
st=stride,
padding=pad,
mode=mode,
ignore_border=ignore_border)
示例10: __init__
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_pool [as 别名]
def __init__(self, rstream, index, x,
params, useRglrz, bnPhase,
poolShape, inFilters, outFilters, stride, ignore_border = False,
b=None, a=None, normParam=None, rglrzParam=None):
'''
Averaging layer + BN + noise
'''
# noise
self.paramsT2 = []
if 'addNoise' in params.rglrz and params.convLayers[index].noise:
if rglrzParam is None:
self.rglrzParam = {}
tempValue = params.rglrzInitial['addNoise'][index]
tempParam = np.asarray(tempValue, dtype=theano.config.floatX)
noizParam = theano.shared(value=tempParam, name='%s_%d' % ('addNoise', index), borrow=True)
self.rglrzParam['addNoise']=noizParam
if params.useT2 and 'addNoise' in params.rglrzTrain:
self.paramsT2 = [noizParam]
#self.output = noiseup(self.output, splitPoint, noizParam, params.noiseT1, params, index, rstream)
x = noiseup(x, useRglrz, noizParam, params.noiseT1, params, index, rstream)
# averaging
if cudasConv:
self.output = cudnn.dnn_pool(x, poolShape, stride = stride, mode = 'average_exc_pad')#, ignore_border = ignore_border)
else:
self.output = pool.pool_2d(x, ds = poolShape, st = stride, ignore_border = ignore_border, mode = 'average_exc_pad')
# if batch normalization
if params.batchNorm and params.convLayers[index].bn:
_, b, a = t1_shared(params=params, rng=0, index=index, nIn=0, nOut=0,
outFilters=outFilters, filterShape=0, defineW=0)
self.b = b; self.a = a
self.paramsT1 = [b]
if normParam is None:
normParam, paramsBN = bn_shared(params, outFilters, index)
self.normParam = normParam
self.paramsBN = paramsBN
self.output, updateBN = bn_layer(self.output, self.a, self.b, self.normParam, params, bnPhase)
self.updateBN = updateBN
# flattening and softmax
self.output = T.flatten(self.output, outdim = 2)
if params.convLayers[index].type == 'average+softmax':
self.output = activation(self.output, 'softmax')