本文整理汇总了Python中theano.tensor.signal.downsample.max_pool_2d函数的典型用法代码示例。如果您正苦于以下问题:Python max_pool_2d函数的具体用法?Python max_pool_2d怎么用?Python max_pool_2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了max_pool_2d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bench_ConvMed
def bench_ConvMed(batchsize):
data_x.value = randn(n_examples, 1, 96, 96)
w0 = shared(rand(6, 1, 7, 7) * numpy.sqrt(6 / (25.)))
b0 = shared(zeros(6))
w1 = shared(rand(16, 6, 7, 7) * numpy.sqrt(6 / (25.)))
b1 = shared(zeros(16))
vv = shared(rand(16*8*8, 120) * numpy.sqrt(6.0/16./25))
cc = shared(zeros(120))
v = shared(zeros(120, outputs))
c = shared(zeros(outputs))
params = [w0, b0, w1, b1, v, c, vv, cc]
c0 = tanh(conv2d(sx, w0, image_shape=(batchsize, 1, 96, 96), filter_shape=(6,1,7,7)) + b0.dimshuffle(0, 'x', 'x'))
s0 = tanh(max_pool_2d(c0, (3,3))) # this is not the correct leNet5 model, but it's closer to
c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 30, 30), filter_shape=(16,6,7,7)) + b1.dimshuffle(0, 'x', 'x'))
s1 = tanh(max_pool_2d(c1, (3,3)))
p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv)+cc), v)+c)
nll = -log(p_y_given_x)[arange(sy.shape[0]), sy]
cost = nll.mean()
gparams = grad(cost, params)
train = function([si, nsi], cost,
updates=[(p,p-lr*gp) for p,gp in zip(params, gparams)])
eval_and_report(train, "ConvMed", [batchsize], N=120)
示例2: get_output
def get_output(self, train):
X = self.get_input(train)
if theano.config.device == 'gpu':
# max_pool_2d X and Z
output = downsample.max_pool_2d(input=X.dimshuffle(0, 4, 2, 3, 1),
ds=(self.pool_size[1], self.pool_size[2]),
ignore_border=self.ignore_border)
# max_pool_2d X and Y (with X constant)
output = downsample.max_pool_2d(input=output.dimshuffle(0, 4, 2, 3, 1),
ds=(1, self.pool_size[0]),
ignore_border=self.ignore_border)
else: #cpu order:(batch, row, column, time, inchannel) from cpu convolution
# max_pool_2d X and Z
output = downsample.max_pool_2d(input=X.dimshuffle(0, 4, 1, 2, 3),
ds=(self.pool_size[1], self.pool_size[2]),
ignore_border=self.ignore_border)
# max_pool_2d X and Y (with X constant)
output = downsample.max_pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, self.pool_size[0]),
ignore_border=self.ignore_border)
output = output.dimshuffle(0, 4, 3, 2, 1)
return output
示例3: output
def output(self, input, mask=None):
if mask is None:
drop_in = input * self.drop
else:
drop_in = input * mask
conv_out1 = conv.conv2d(input=drop_in, filters=self.W1, filter_shape=self.filter_shape1,
image_shape=self.shape_in)
linout1 = T.nnet.relu(conv_out1 + self.b1.dimshuffle('x', 0, 'x', 'x'))
output1 = (
linout1 if self.activation is None
else self.activation(linout1)
)
pooled_out1 = downsample.max_pool_2d(input=output1, ds=self.poolsize1, ignore_border=True)
conv_out2 = conv.conv2d(input=drop_in, filters=self.W2, filter_shape=self.filter_shape2,
image_shape=self.shape_in)
linout2 = T.nnet.relu(conv_out2 + self.b2.dimshuffle('x', 0, 'x', 'x'))
output2 = (
linout2 if self.activation is None
else self.activation(linout2)
)
pooled_out2 = downsample.max_pool_2d(input=output2, ds=self.poolsize2, ignore_border=True)
conv_out3 = conv.conv2d(input=drop_in, filters=self.W3, filter_shape=self.filter_shape3,
image_shape=self.shape_in)
linout3 = T.nnet.relu(conv_out3 + self.b3.dimshuffle('x', 0, 'x', 'x'))
output3 = (
linout3 if self.activation is None
else self.activation(linout3)
)
pooled_out3 = downsample.max_pool_2d(input=output3, ds=self.poolsize3, ignore_border=True)
output = T.concatenate([pooled_out1, pooled_out2, pooled_out3], axis=1)
return output
示例4: bench_ConvLarge
def bench_ConvLarge(batchsize, variant=True):
name = "ConvLarge_b" + str(GlobalBenchReporter.batch_size)
name += "_" + config.linker
# Image shape 256x256
GlobalBenchReporter.batch_size = batchsize
data_x.set_value(randn(n_examples, 1, 256, 256))
w0 = shared(rand(6, 1, 7, 7) * numpy.sqrt(6 / (25.)))
b0 = shared(zeros(6))
w1 = shared(rand(16, 6, 7, 7) * numpy.sqrt(6 / (25.)))
b1 = shared(zeros(16))
vv = shared(rand(16 * 11 * 11, 120) * numpy.sqrt(6.0 / 16. / 25))
cc = shared(zeros(120))
v = shared(zeros(120, outputs))
c = shared(zeros(outputs))
params = [w0, b0, w1, b1, v, c, vv, cc]
c0 = tanh(conv2d(sx, w0, image_shape=(batchsize, 1, 256, 256),
filter_shape=(6, 1, 7, 7)) + b0.dimshuffle(0, 'x', 'x'))
# this is not the correct leNet5 model, but it's closer to
s0 = tanh(max_pool_2d(c0, (5, 5)))
c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 50, 50),
filter_shape=(16, 6, 7, 7)) + b1.dimshuffle(0, 'x', 'x'))
s1 = tanh(max_pool_2d(c1, (4, 4)))
p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv) + cc), v) + c)
nll = -log(p_y_given_x)[arange(sy.shape[0]), sy]
cost = nll.mean()
gparams = grad(cost, params)
train = function([si, nsi], cost,
updates=[(p, p - lr * gp) for p, gp in zip(params, gparams)],
name=name)
GlobalBenchReporter.eval_model(train, name)
if not variant:
return
# Versions with no inputs
snsi.set_value(GlobalBenchReporter.batch_size)
c0 = tanh(conv2d(ssx, w0, image_shape=(batchsize, 1, 256, 256),
filter_shape=(6, 1, 7, 7)) + b0.dimshuffle(0, 'x', 'x'))
# this is not the correct leNet5 model, but it's closer to
s0 = tanh(max_pool_2d(c0, (5, 5)))
c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 50, 50),
filter_shape=(16, 6, 7, 7)) + b1.dimshuffle(0, 'x', 'x'))
s1 = tanh(max_pool_2d(c1, (4, 4)))
p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv) + cc), v) + c)
nll = -log(p_y_given_x)[arange(ssy.shape[0]), ssy]
cost = nll.mean()
gparams = grad(cost, params)
train2 = function([], cost,
updates=[(p, p - lr * gp) for p, gp in zip(params, gparams)] + [(ssi, ssi + snsi)],
name=name)
GlobalBenchReporter.bypass_eval_model(train2, name, init_to_zero=ssi)
示例5: __init__
def __init__(self, rng, input_A, input_B, filter_shape, image_shape, poolsize=(2, 2)):
print image_shape
print filter_shape
assert image_shape[1] == filter_shape[1]
#calc the W_bound and init the W
fan_in = numpy.prod(filter_shape[1:])
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype = theano.config.floatX),
borrow = True)
b_value = numpy.zeros((filter_shape[0],),
dtype = theano.config.floatX)
self.b = theano.shared(value = b_value, borrow = True)
conv_out_A = conv.conv2d(input = input_A, filters = self.W,
filter_shape = filter_shape, image_shape = image_shape)
conv_out_B = conv.conv2d(input = input_B, filters = self.W,
filter_shape = filter_shape, image_shape = image_shape)
pooled_out_A = downsample.max_pool_2d(input = conv_out_A,
ds = poolsize, ignore_border = True)
pooled_out_B = downsample.max_pool_2d(input = conv_out_B,
ds = poolsize, ignore_border = True)
self.output_A = T.tanh(pooled_out_A + self.b.dimshuffle('x',0,'x','x'))
self.output_B = T.tanh(pooled_out_B + self.b.dimshuffle('x',0,'x','x'))
self.params = [self.W, self.b]
示例6: convolutional_model
def convolutional_model(X, w_1, w_2, w_3, w_4, w_5, w_6, p_1, p_2, p_3, p_4, p_5):
l1 = dropout(T.tanh( max_pool_2d(T.maximum(conv2d(X, w_1, border_mode='full'),0.), (2, 2),ignore_border=True) + b_1.dimshuffle('x', 0, 'x', 'x') ), p_1)
l2 = dropout(T.tanh( max_pool_2d(T.maximum(conv2d(l1, w_2), 0.), (2, 2),ignore_border=True) + b_2.dimshuffle('x', 0, 'x', 'x') ), p_2)
l3 = dropout(T.flatten(T.tanh( max_pool_2d(T.maximum(conv2d(l2, w_3), 0.), (2, 2),ignore_border=True) + b_3.dimshuffle('x', 0, 'x', 'x') ), outdim=2), p_3)# flatten to switch back to 1d layers
l4 = dropout(T.maximum(T.dot(l3, w_4), 0.), p_4)
l5 = dropout(T.maximum(T.dot(l4, w_5), 0.), p_5)
return T.dot(l5, w_6)
示例7: model
def model(X, w, w2, w3, w4, w5, w_o, b_h1, b_h2, b_o, p_drop_conv, p_drop_hidden):
l1_lin = conv2d(X, w, border_mode='full')+b_c1.dimshuffle('x', 0, 'x', 'x')
l1a = alpha_c1 * rectify(l1_lin) + (1.- alpha_c1) * T.tanh(l1_lin)
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
l2_lin = conv2d(l1, w2) + b_c2.dimshuffle('x', 0, 'x', 'x')
l2a = alpha_c2 * rectify(l2_lin) + (1. - alpha_c2) * T.tanh(l2_lin)
l2 = max_pool_2d(l2a, (2, 2))
l2 = dropout(l2, p_drop_conv)
l3_lin = conv2d(l2, w3) + b_c3.dimshuffle('x', 0, 'x', 'x')
l3a = alpha_c3 * rectify(l3_lin) + ( 1 - alpha_c3) * T.tanh(l3_lin)
l3b = max_pool_2d(l3a, (2, 2))
l3 = T.flatten(l3b, outdim=2)
l3 = dropout(l3, p_drop_conv)
l4_lin = T.dot(l3, w4) + b_h1
l4 = alpha_h1 * rectify(l4_lin) + (1.-alpha_h1) * T.tanh(l4_lin)
l4 = dropout(l4, p_drop_hidden)
l5_lin = T.dot(l4, w5) + b_h2
l5 = alpha_h1 * rectify(l5_lin) + (1.-alpha_h2) * T.tanh(l5_lin)
l5 = dropout(l5, p_drop_hidden)
pyx = softmax(T.dot(l5, w_o) + b_o )
return l1, l2, l3, l4, l5, pyx
示例8: test_pooling_opt
def test_pooling_opt():
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
x = T.fmatrix()
f = theano.function(
[x],
max_pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
f = theano.function(
[x],
T.grad(max_pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(),
x),
mode=mode_with_gpu.including("cudnn"))
assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
示例9: model
def model(self, X, w1, w2, w3, w4, wo, p_drop_conv, p_drop_hidden):
nin1 = self.init_weights((32, 3, 1, 1))
nin2 = self.init_weights((64, 3, 1, 1))
nin3 = self.init_weights((128, 3, 1, 1))
l1a = self.rectify(conv2d(X, w1, border_mode = "full"))
l1 = max_pool_2d(l1a, (2, 2))
l1 = conv2d(l1, nin1)
l1 = self.dropout(l1, p_drop_conv)
l2a = self.rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = conv2d(l2, nin2)
l2 = self.dropout(l2, p_drop_conv)
l3a = self.rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
l3b = conv2d(l3b, nin3)
l3 = T.flatten(l3b, outdim = 2)
l3 = self.dropout(l3, p_drop_conv)
l4 = self.rectify(T.dot(l3, w4))
l4 = self.dropout(l4, p_drop_hidden)
pyx = self.softmax(T.dot(l4, wo))
return l1, l2, l3, l4, pyx
示例10: model
def model(self, X, w1, w2, w3, w4, wo, p_drop_conv, p_drop_hidden):
# print X
l1a = self.rectify(conv2d(X, w1, border_mode = "full"))
l1 = max_pool_2d(l1a, (2, 2))
l1 = self.dropout(l1, p_drop_conv)
# print np.mean(l1)
l2a = self.rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = self.dropout(l2, p_drop_conv)
# print np.mean(l2)
l3a = self.rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
l3 = T.flatten(l3b, outdim = 2)
l3 = self.dropout(l3, p_drop_conv)
# print np.mean(l3)
l4 = self.rectify(T.dot(l3, w4))
l4 = self.dropout(l4, p_drop_hidden)
# print np.mean(l4)
# l4 = T.dot(l4, wo)
sig = T.dot(l4, wo)
# pyx = self.softmax(T.dot(l4, wo))
return l1, l2, l3, l4, sig
示例11: __init__
def __init__(self, rng, input, filter_shape, image_shape,
poolsize=(2, 2), poolmode="max", non_linear="tanh"):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.non_linear = non_linear
self.poolmode = poolmode
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /numpy.prod(poolsize))
# initialize weights with random weights
if self.non_linear=="none" or self.non_linear=="relu":
self.W = theano.shared(numpy.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
else:
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(numpy.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True, name="b_conv")
# convolve input feature maps with filters
conv_out = conv.conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True, mode=self.poolmode)
elif self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True, mode=self.poolmode)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True, mode=self.poolmode)
self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
self.params = [self.W, self.b]
示例12: model
def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):
# conv + ReLU + pool
# border_mode = full, then zero-padding, default is valid
l1a = rectify(conv2d(X, w, border_mode='full'))
# pooling at 2*2 kernel and select the largest in the kernel
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
# conv + ReLU + pool
l2a = rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = dropout(l2, p_drop_conv)
# conv + ReLU + pool
l3a = rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
# convert a ndim array to 2 dim. if l3b dim larger than 2 then the rest dim collapsed.
# flatten for enter the FC layer
l3 = T.flatten(l3b, outdim=2)
l3 = dropout(l3, p_drop_conv)
# FC + ReLU
l4 = rectify(T.dot(l3, w4))
l4 = dropout(l4, p_drop_hidden)
# output layer + softmax
pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, l4, pyx
示例13: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
pool_out = downsample.max_pool_2d(x, ds=pool_size, st=strides,
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
pool_out = downsample.max_pool_2d(x, ds=pool_size, st=strides,
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例14: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode="valid", dim_ordering="th", pool_mode="max"):
if border_mode == "same":
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == "valid":
ignore_border = True
padding = (0, 0)
else:
raise Exception("Invalid border mode: " + str(border_mode))
if dim_ordering not in {"th", "tf"}:
raise Exception("Unknown dim_ordering " + str(dim_ordering))
if dim_ordering == "tf":
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == "max":
pool_out = downsample.max_pool_2d(
x, ds=pool_size, st=strides, ignore_border=ignore_border, padding=padding, mode="max"
)
elif pool_mode == "avg":
pool_out = downsample.max_pool_2d(
x, ds=pool_size, st=strides, ignore_border=ignore_border, padding=padding, mode="average_exc_pad"
)
else:
raise Exception("Invalid pooling mode: " + str(pool_mode))
if dim_ordering == "tf":
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例15: testmodel
def testmodel(X, w, w2, w3, w_o, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w, border_mode='valid'))
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
l2a = rectify(conv2d(l1, w2))
l2b = max_pool_2d(l2a, (2, 2))
l2 = T.flatten(l2b, outdim=2)
l2 = dropout(l2, p_drop_conv)
l3 = rectify(T.dot(l2, w3))
l3 = dropout(l3, p_drop_hidden)
pyx = softmax(T.dot(l3, w_o))
# l3a = rectify(conv2d(l2, w3))
# l3b = max_pool_2d(l3a, (2, 2))
# l3 = T.flatten(l3b, outdim=2)
# l3 = dropout(l3, p_drop_conv)
# problem happening here
# l4 = rectify(T.dot(l3, w4))
# l4 = dropout(l4, p_drop_hidden)
# pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, pyx