本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_pool函数的典型用法代码示例。如果您正苦于以下问题:Python dnn_pool函数的具体用法?Python dnn_pool怎么用?Python dnn_pool使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dnn_pool函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: feature_extractor
def feature_extractor(input_data):
# conv stage 0 (64x64=>32x32)
h0_0 = dnn_conv(input_data, conv_w0_0, border_mode=(1, 1)) + conv_b0_0.dimshuffle("x", 0, "x", "x")
h0_1 = dnn_conv(relu(h0_0), conv_w0_1, border_mode=(1, 1)) + conv_b0_1.dimshuffle("x", 0, "x", "x")
h0 = dnn_pool(relu(h0_1), ws=(2, 2), stride=(2, 2))
# conv stage 1 (32x32=>16x16)
h1_0 = dnn_conv(h0, conv_w1_0, border_mode=(1, 1)) + conv_b1_0.dimshuffle("x", 0, "x", "x")
h1_1 = dnn_conv(relu(h1_0), conv_w1_1, border_mode=(1, 1)) + conv_b1_1.dimshuffle("x", 0, "x", "x")
h1 = dnn_pool(relu(h1_1), ws=(2, 2), stride=(2, 2))
# conv stage 2 (16x16=>8x8)
h2_0 = dnn_conv(h1, conv_w2_0, border_mode=(1, 1)) + conv_b2_0.dimshuffle("x", 0, "x", "x")
h2_1 = dnn_conv(relu(h2_0), conv_w2_1, border_mode=(1, 1)) + conv_b2_1.dimshuffle("x", 0, "x", "x")
h2_2 = dnn_conv(relu(h2_1), conv_w2_2, border_mode=(1, 1)) + conv_b2_2.dimshuffle("x", 0, "x", "x")
h2 = dnn_pool(relu(h2_2), ws=(2, 2), stride=(2, 2))
# conv stage 3 (8x8=>4x4)
h3_0 = dnn_conv(h2, conv_w3_0, border_mode=(1, 1)) + conv_b3_0.dimshuffle("x", 0, "x", "x")
h3_1 = dnn_conv(relu(h3_0), conv_w3_1, border_mode=(1, 1)) + conv_b3_1.dimshuffle("x", 0, "x", "x")
h3_2 = dnn_conv(relu(h3_1), conv_w3_2, border_mode=(1, 1)) + conv_b3_2.dimshuffle("x", 0, "x", "x")
h3 = dnn_pool(relu(h3_2), ws=(2, 2), stride=(2, 2))
# conv stage 4 (4x4=>2x2)
h4_0 = dnn_conv(h3, conv_w4_0, border_mode=(1, 1)) + conv_b4_0.dimshuffle("x", 0, "x", "x")
h4_1 = dnn_conv(relu(h4_0), conv_w4_1, border_mode=(1, 1)) + conv_b4_1.dimshuffle("x", 0, "x", "x")
h4_2 = dnn_conv(relu(h4_1), conv_w4_2, border_mode=(1, 1)) + conv_b4_2.dimshuffle("x", 0, "x", "x")
h4 = dnn_pool(relu(h4_2), ws=(2, 2), stride=(2, 2))
return T.flatten(h4, 2)
示例2: dnn_pool3d2d
def dnn_pool3d2d(inputs, pool_shape, pool_stride, image_shape, mode='max'):
""" Pool first all time-slices, so 2d-poolings over width and height.
Then do a 1dpooling over the time (done as fake2d pooling with pooling shape
1 for the ignored dimension."""
for i in xrange(3):
assert pool_shape[i] <= image_shape[i], ("pool shape should be less"
" or equal than image shape, {:d} > {:d} for "
"pool_shape: {:s}, image_shape:{:s}").format(pool_shape[i],
image_shape[i], pool_shape, image_shape)
output_shape = [((image_shape[i] - pool_shape[i]) // pool_stride[i]) + 1
for i in xrange(3)]
output2d_pooled = gpu_alloc_empty(inputs.shape[0], inputs.shape[1],
output_shape[0], output_shape[1], image_shape[2])
for z in range(image_shape[2]):
pooled_slice = dnn_pool(inputs[:,:,:,:,z], ws=pool_shape[0:2],
stride=pool_stride[0:2], mode=mode)
output2d_pooled = T.set_subtensor(output2d_pooled[:,:,:,:,z], pooled_slice)
# now 1d-pool over last dimension...
# could use first or second dimension as input of pool1d..
# compute maximum y index after first pooling
output = gpu_alloc_empty(inputs.shape[0], inputs.shape[1],
output_shape[0], output_shape[1], output_shape[2])
max_y = output_shape[1]
for y in range(max_y):
# ignore first=0 dimension, alrdy pooled in loop before
# so set stride and shape to 1 there
final_pooled_slice = dnn_pool(output2d_pooled[:,:,:,y,:],
ws=(1, pool_shape[2]),
stride=(1, pool_stride[2]), mode=mode)
output = T.set_subtensor(output[:,:,:,y,:], final_pooled_slice)
return output
示例3: __init__
def __init__(self,h_low,h_high,method="adascan_mod"):
kern = build_filters(h_low,h_high)
sharedKern = theano.shared(kern,name='sharedKern')
input = theano.tensor.tensor4(name='input')
self.conv_fun = theano.function([input],dnn_conv(input,sharedKern))
self.down_x = theano.function([input],dnn_pool(input,(2,1),stride=(2,1),mode='average_inc_pad'))
self.down_y = theano.function([input],dnn_pool(input,(1,2),stride=(1,2),mode='average_inc_pad'))
self.h_low, self.h_high, self.method = h_low, h_high, method
示例4: test_dnn_pool_desc_merge
def test_dnn_pool_desc_merge():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
x = theano.tensor.ftensor4("x")
y = dnn.dnn_pool(x, (2, 2))
z = dnn.dnn_pool(x, (2, 2))
f = theano.function([x], [y, z])
descs = [n for n in f.maker.fgraph.apply_nodes if isinstance(n.op, dnn.GpuDnnPoolDesc)]
assert len(descs) == 1, f.maker.fgraph
示例5: model
def model(X,
h2_u, h3_u,
h2_s, h3_s,
w, w2, g2, b2, w3, g3, b3, wy
):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2, u=h2_u, s=h2_s))
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3, u=h3_u, s=h3_s))
h = T.flatten(dnn_pool(h, (4, 4), (4, 4), mode='max'), 2)
h2 = T.flatten(dnn_pool(h2, (2, 2), (2, 2), mode='max'), 2)
h3 = T.flatten(dnn_pool(h3, (1, 1), (1, 1), mode='max'), 2)
f = T.concatenate([h, h2, h3], axis=1)
return [f]
示例6: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
if _on_gpu() and dnn.dnn_available():
pool_out = dnn_pool(x,
pool_size,
stride=strides,
mode='max')
else:
pool_out = downsample.max_pool_2d(x,
ds=pool_size,
st=strides,
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
if _on_gpu() and dnn.dnn_available():
pool_out = dnn_pool(x,
pool_size,
stride=strides,
mode='average_exc_pad')
else:
pool_out = downsample.max_pool_2d(x,
ds=pool_size,
st=strides,
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例7: apply
def apply(self, input_):
"""Apply the pooling (subsampling) transformation.
"""
if self.pooling_size == (1, 1, 1):
return input_
# Pooling on last two dimensions
input_ = input_.reshape((input_.shape[0], input_.shape[1] * input_.shape[2], input_.shape[3], input_.shape[4]))
p = dnn_pool(img=input_, ws=tuple(self.pooling_size[1:]), stride=tuple(self.step[1:]))
p = p.reshape((p.shape[0], input_.shape[1], input_.shape[2], p.shape[2], p.shape[3]))
# Pooling on first dimension
p = p.reshape((p.shape[0], p.shape[1], p.shape[2], p.shape[3] * p.shape[4]))
output = dnn_pool(img=p, ws=(self.pooling_size[0], 1), stride=(self.step[0], 1))
output = output.reshape((output.shape[0], output.shape[1], output.shape[2], p.shape[3], p.shape[4]))
return output
示例8: spp_predict
def spp_predict(fmaps, pyramid):
""" From input confidence maps, perform "SPP" prediction across a scale pyramid and using
spatial pruning of labels and confidences.
Arguments:
fmaps
theano symbolic 4D tensor with shape (nb_images, nb_labels, nb_rows, nb_cols)
pyramid
python list of average pooling kernel sizes, e.g. [3, 5].
Returns:
symbolic (nb_images, nb_labels) tensor of spatially pooled multi-scale predictions.
"""
# Step 1: average pooling of the confidences across multiple scales, then average pooling
# of that using spatial information to get multi-scale spatial confidences.
pooled_maps = fmaps
nb_images, nb_labels, nb_rows, nb_cols = fmaps.shape
for ws in pyramid:
pooled_maps += resize(
dnn_pool(fmaps, (ws, ws), (1, 1), mode='average'),
(nb_rows, nb_cols)
)
pooled_maps /= len(pyramid) + 1
# Step 2: spatial max-pooling across labels.
label_conf, label_map = T.max_and_argmax(pooled_maps, axis=1, keepdims=True)
bcast_labels = T.addbroadcast(T.arange(nb_labels).reshape([1, nb_labels, 1, 1]), 0, 2, 3)
label_mask = T.eq(bcast_labels, label_map)
return T.mean(label_mask * label_conf, axis=[2,3])
示例9: apply
def apply(self, input):
"""
Apply this discriminator module to the given input. This produces a
collection of filter responses for feedforward and a spatial grid of
discriminator outputs.
"""
bm = int((self.filt_dim - 1) / 2) # use "same" mode convolutions
ss = self.ds_stride # stride for "learned downsampling"
# apply first conv layer
h1 = dnn_conv(input, self.w1, subsample=(1, 1), border_mode=(bm, bm))
if self.apply_bn_1:
h1 = batchnorm(h1, g=self.g1, b=self.b1)
h1 = lrelu(h1)
# apply second conv layer (may include downsampling)
if self.use_pooling:
h2 = dnn_conv(h1, self.w2, subsample=(1, 1), border_mode=(bm, bm))
if self.apply_bn_2:
h2 = batchnorm(h2, g=self.g2, b=self.b2)
h2 = lrelu(h2)
h2 = dnn_pool(h2, (ss,ss), stride=(ss, ss), mode='max', pad=(0, 0))
else:
h2 = dnn_conv(h1, self.w2, subsample=(ss, ss), border_mode=(bm, bm))
if self.apply_bn_2:
h2 = batchnorm(h2, g=self.g2, b=self.b2)
h2 = lrelu(h2)
# apply discriminator layer
y = dnn_conv(h2, self.wd, subsample=(1, 1), border_mode=(bm, bm))
y = sigmoid(T.flatten(y, 2)) # flatten to (batch_size, num_preds)
return h2, y
示例10: compute_output
def compute_output(self, network, in_vw):
mode = network.find_hyperparameter(["mode"])
pool_size = network.find_hyperparameter(["pool_size"])
dim = len(pool_size)
# works for sizes 2 and 3
assert dim in [2, 3]
stride = network.find_hyperparameter(["pool_stride",
"stride"],
None)
if stride is None:
stride = pool_size
pad = network.find_hyperparameter(["pool_pad", "pad"], (0,) * dim)
assert dim == len(stride) == len(pad)
if dim == 2:
pool_axes = (2, 3)
elif dim == 3:
pool_axes = (2, 3, 4)
out_shape = downsample.pool_output_shape(
input_shape=in_vw.shape,
axes=pool_axes,
pool_shape=pool_size,
strides=stride,
pads=pad)
out_var = dnn.dnn_pool(img=in_vw.variable,
ws=pool_size,
stride=stride,
pad=pad,
mode=mode)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
示例11: __init__
def __init__(self, inputs=None, size=(1, 1), stride=None, pad=(0, 0), mode='max', ignore_border=True):
"""
Parameters
----------
inputs : tuple(shape, `Theano.TensorType`)
tuple(shape, `Theano.TensorType`) or None describing the input to use for this layer.
`shape` will be a monad tuple representing known sizes for each dimension in the `Theano.TensorType`.
If 4D images as input, expect formatted as (batch_size, #channels, rows, cols).
size : tuple(int) or int
Downsample factor over (rows, columns). If it is an int, it will be the same size for rows and cols.
stride : tuple(int) or int
Stride size (step size), which is the number of shifts over rows/cols to get the
next pool region. If it is an int, it will be the same size for rows and cols.
pad : tuple(int) or int
(pad_h, pad_w), pad zeros to extend beyond four borders
of the images, pad_h is the size of the top and bottom margins,
and pad_w is the size of the left and right margins. If it is an int, it will be the same
size for rows and cols.
mode : 'max', 'sum', 'average_inc_pad', 'average_exc_pad'
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ignore_border : bool
If `size` doesn't divide the input `shape`, do we include an extra row/col of
partial downsampling (False) or ignore it (True). When True, (5,5) input with size=(2,2)
will generate a (2,2) output. (3,3) otherwise.
"""
super(Pool2D, self).__init__(inputs=inputs, size=size, stride=stride, pad=pad)
input_shape, self.input = self.inputs[0]
if isinstance(size, int):
size = (size, ) * 2
if stride is None:
stride = size
if isinstance(stride, int):
stride = (stride, ) * 2
if isinstance(pad, int):
pad = (pad, ) * 2
assert len(size) == len(stride) == len(pad), "Size, stride, and pad must have the same number of dimensions."
self.output_size = tuple(_pool_out_size(imgshape=input_shape,
ds=size,
st=stride,
ignore_border=ignore_border,
padding=pad))
cudnn_modes = ['max', 'average_inc_pad', 'average_exc_pad']
if has_cudnn and mode in cudnn_modes and ignore_border and self.input.ndim == 4:
self.output = dnn_pool(img=self.input,
ws=size,
stride=stride,
mode=mode,
pad=pad)
else:
self.output = max_pool_2d(input=self.input,
ds=size,
st=stride,
padding=pad,
mode=mode,
ignore_border=ignore_border)
示例12: apply
def apply(self, input_):
"""Apply the pooling (subsampling) transform
Parameters
----------
input_ : :class:`~tensor.TensorVariable`
3D tensor with axes batch size, sequence, features
Returns
-------
output: :class:`~tensor.TensorVariable`
3D tensor with axes batch size, sequence, features
"""
shuffled = input_.dimshuffle(0, 2, 1, 'x')
# batch_size, num_filters, x_map, 1
if self.step == None:
st = (1,1)
else:
st = (self.step, 1)
#output = max_pool_2d(shuffled, (self.pooling_length, 1), st=st)
output = dnn_pool(shuffled, (self.pooling_length, 1), stride=st)
sequence_out = output[:, :, :, 0].dimshuffle(0, 2, 1)
return sequence_out
示例13: get_convpool
def get_convpool(self, img, kerns, conv_b, subsample, border_mode, pooling, ws=None, stride=None, normalizing=False):
conv_out = dnn.dnn_conv(
img=img,
kerns=kerns,
subsample=subsample,
border_mode=border_mode
)
conv_out += conv_b.dimshuffle('x',0,'x','x')
conv_out = T.maximum(conv_out, 0)
if pooling:
pool_out = dnn.dnn_pool(
conv_out,
ws=ws,
stride=stride
)
else:
pool_out = conv_out
if normalizing:
norm_out = CrossChannelNormalization()(pool_out)
else:
norm_out = pool_out
return norm_out
示例14: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else: # CPU veresion support by theano
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode=pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例15: __init__
def __init__(self, input, image_shape, filter_shape, convstride, padsize,
poolsize, poolstride,poolpad, W, b, lrn=False,
lib_conv='cudnn',
):
self.filter_size = filter_shape
self.convstride = convstride
self.padsize = padsize
self.channel = image_shape[0]
self.lrn = lrn
self.lib_conv = lib_conv
self.filter_shape = np.asarray(filter_shape)
self.image_shape = np.asarray(image_shape)
self.W = W#Weight(self.filter_shape)
self.b = b#Weight(self.filter_shape[3])#, bias_init, std=0)
input_shuffled = input.dimshuffle(3, 0, 1, 2) # c01b to bc01
# in01out to outin01
# print image_shape_shuffled
# print filter_shape_shuffled
W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2) # c01b to bc01
conv_out = dnn.dnn_conv(img=input_shuffled,
kerns=W_shuffled,
subsample=(convstride, convstride),
border_mode=padsize,
)
conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
# ReLu
self.output = T.maximum(conv_out, 0)
# Pool
self.poolsize = poolsize
self.poolstride = poolstride
self.poolpad = poolpad
if self.poolsize != 1:
self.output = dnn.dnn_pool(self.output,
ws=(poolsize, poolsize),
stride=(poolstride, poolstride),
mode='max', pad=(poolpad, poolpad))
self.output = self.output.dimshuffle(1, 2, 3, 0) # bc01 to c01b
# LRN
if self.lrn:
self.lrn_func = CrossChannelNormalization()
# lrn_input = gpu_contiguous(self.output)
self.output = self.lrn_func(self.output)
self.params = [self.W.val, self.b.val]
self.weight_type = ['W', 'b']
print "conv ({}) layer with shape_in: {}".format(lib_conv,
str(image_shape))