本文整理汇总了Python中theano.tensor.signal.pool.pool_2d函数的典型用法代码示例。如果您正苦于以下问题:Python pool_2d函数的具体用法?Python pool_2d怎么用?Python pool_2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pool_2d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pooling_with_tensor_vars
def test_pooling_with_tensor_vars(self):
x = tensor.ftensor4()
window_size = tensor.ivector()
stride = tensor.ivector()
padding = tensor.ivector()
data = numpy.random.normal(0, 1, (1, 1, 5, 5)).astype('float32')
# checking variable params vs fixed params
for ignore_border in [True, False]:
for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
y = pool_2d(x, window_size, ignore_border, stride, padding,
mode)
dx = theano.gradient.grad(y.sum(), x)
var_fct = theano.function([x, window_size, stride, padding],
[y, dx])
for ws in (4, 2, 5):
for st in (2, 3):
for pad in (0, 1):
if (pad > st or st > ws or
(pad != 0 and not ignore_border) or
(mode == 'average_exc_pad' and pad != 0)):
continue
y = pool_2d(x, (ws, ws), ignore_border, (st, st),
(pad, pad), mode)
dx = theano.gradient.grad(y.sum(), x)
fix_fct = theano.function([x], [y, dx])
var_y, var_dx = var_fct(data, (ws, ws), (st, st),
(pad, pad))
fix_y, fix_dx = fix_fct(data)
utt.assert_allclose(var_y, fix_y)
utt.assert_allclose(var_dx, fix_dx)
示例2: test_pooling_opt
def test_pooling_opt():
if not dnn.dnn_available(test_ctx_name):
raise SkipTest(dnn.dnn_available.msg)
x = T.fmatrix()
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
f = theano.function(
[x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(),
x),
mode=mode_with_gpu.including("cudnn"))
assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
示例3: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode="valid", dim_ordering=_IMAGE_DIM_ORDERING, pool_mode="max"):
if border_mode == "same":
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == "valid":
padding = (0, 0)
else:
raise Exception("Invalid border mode: " + str(border_mode))
if dim_ordering not in {"th", "tf"}:
raise Exception("Unknown dim_ordering " + str(dim_ordering))
if dim_ordering == "tf":
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == "max":
pool_out = pool.pool_2d(x, ds=pool_size, st=strides, ignore_border=True, padding=padding, mode="max")
elif pool_mode == "avg":
pool_out = pool.pool_2d(
x, ds=pool_size, st=strides, ignore_border=True, padding=padding, mode="average_exc_pad"
)
else:
raise Exception("Invalid pooling mode: " + str(pool_mode))
if border_mode == "same":
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
pool_out = pool_out[:, :, :expected_width, :expected_height]
if dim_ordering == "tf":
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例4: __init__
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), non_linear="tanh"):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps, filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps, image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.non_linear = non_linear
self.output_shape = (image_shape[0],filter_shape[0],int((image_shape[2]-filter_shape[2]+1)/poolsize[0]),int(image_shape[3]-filter_shape[3]+1)/poolsize[1])
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /np.prod(poolsize))
# initialize weights with random weights
if self.non_linear=="none" or self.non_linear=="relu":
self.W = theano.shared(np.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
else:
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
b_values = np.zeros((self.output_shape[1],image_shape[2]-filter_shape[2]+1,image_shape[3]-filter_shape[3]+1), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True, name="b_conv")
# convolve input feature maps with filters
self.conv_out = conv.conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)
if self.non_linear=="tanh":
self.conv_out_tanh = T.tanh(self.conv_out + self.b)
self.output = pool.pool_2d(input=self.conv_out_tanh, ds=self.poolsize, ignore_border=True)
elif self.non_linear=="relu":
self.conv_out_tanh = ReLU(self.conv_out + self.b)
self.output = pool.pool_2d(input=self.conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = pool.pool_2d(input=self.conv_out, ds=self.poolsize, ignore_border=True)
self.output = pooled_out + self.b
self.params = [self.W, self.b]
self.L2 = (self.W**2).sum()
示例5: model
def model(X, params, pDropConv, pDropHidden):
lnum = 0 # conv: (32, 32) pool: (16, 16)
layer = nin(X, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (16, 16) pool: (8, 8)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (8, 8) pool: (4, 4)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
# 全连接层
# layer = T.flatten(layer, outdim=2)
# lnum += 1
# layer = fc(layer, params[lnum])
# layer = utils.dropout(layer, pDropHidden)
# lnum += 1
# layer = fc(layer, params[lnum])
# 全局平均池化
lnum += 1
layer = conv1t1(layer, params[lnum])
layer = basicUtils.dropout(layer, pDropHidden)
lnum += 1
layer = conv1t1(layer, params[lnum])
layer = gap(layer)
return softmax(layer) # 如果使用nnet中的softmax训练产生NAN
示例6: model
def model(X, params, featMaps, pieces, pDropConv, pDropHidden):
lnum = 0 # conv: (32, 32) pool: (16, 16)
layer = conv2d(X, params[lnum][0], border_mode='half') + \
params[lnum][1].dimshuffle('x', 0, 'x', 'x')
layer = maxout(layer, featMaps[lnum], pieces[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (16, 16) pool: (8, 8)
layer = conv2d(layer, params[lnum][0], border_mode='half') + \
params[lnum][1].dimshuffle('x', 0, 'x', 'x')
layer = maxout(layer, featMaps[lnum], pieces[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (8, 8) pool: (4, 4)
layer = conv2d(layer, params[lnum][0], border_mode='half') + \
params[lnum][1].dimshuffle('x', 0, 'x', 'x')
layer = maxout(layer, featMaps[lnum], pieces[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1
layer = T.flatten(layer, outdim=2)
layer = T.dot(layer, params[lnum][0]) + params[lnum][1].dimshuffle('x', 0)
layer = relu(layer, alpha=0)
layer = basicUtils.dropout(layer, pDropHidden)
lnum += 1
layer = T.dot(layer, params[lnum][0]) + params[lnum][1].dimshuffle('x', 0)
layer = relu(layer, alpha=0)
layer = basicUtils.dropout(layer, pDropHidden)
lnum += 1
return softmax(T.dot(layer, params[lnum][0]) + params[lnum][1].dimshuffle('x', 0)) # 如果使用nnet中的softmax训练产生NAN
示例7: test_old_pool_interface
def test_old_pool_interface(self):
if sys.version_info[0] != 3:
# Only tested with python 3 because of pickling issues.
raise SkipTest('Skip old pool interface with python 2.x')
# 1. Load the old version
testfile_dir = os.path.dirname(os.path.realpath(__file__))
fname = 'old_pool_interface.pkl'
with open(os.path.join(testfile_dir, fname), 'rb') as fp:
try:
old_fct = cPickle.load(fp, encoding='latin1')
except ImportError:
# Windows sometimes fail with nonsensical errors like:
# ImportError: No module named type
# ImportError: No module named copy_reg
# when "type" and "copy_reg" are builtin modules.
if sys.platform == 'win32':
exc_type, exc_value, exc_trace = sys.exc_info()
reraise(SkipTest, exc_value, exc_trace)
raise
# 2. Create the new version
x = theano.tensor.ftensor4()
y = pool_2d(x, (2, 2), mode='max', ignore_border=True)
z = pool_2d(x, (2, 2), mode='average_exc_pad', ignore_border=True)
dy_dx = theano.gradient.grad(y.sum(), x)
dz_dx = theano.gradient.grad(z.sum(), x)
new_fct = theano.function([x], [y, z, dy_dx, dz_dx])
# 3. Assert that the answer is the same
rng = numpy.random.RandomState(utt.fetch_seed())
image_val = rng.rand(4, 6, 7, 9).astype(numpy.float32)
old_out = old_fct(image_val)
new_out = new_fct(image_val)
for o, n in zip(old_out, new_out):
utt.assert_allclose(o, n)
示例8: CNN
def CNN(x,c_l1,c_l2,f_l1,f_l2,insize):
print "in size ", insize
conv1=tensor.nnet.relu(conv2d(x,c_l1)) #default stride=1 --subsample=(1,1)
conv1_shp=get_conv_output_shape(insize,c_l1.get_value().shape,border_mode='valid',subsample=(1,1))
print "conv1 size ", conv1_shp
pool1=pool_2d(conv1,(3,3),st=(3,3),ignore_border=True) #default maxpool
pool1_shp=get_pool_output_shape(conv1_shp,pool_size=(3,3),st=(3,3),ignore_border=True)
print "pool1 size ", pool1_shp
lrn1=LRN(pool1,pool1_shp)
lrn1_shp=tuple(pool1_shp)
print "cross map norm1 size ", lrn1_shp
conv2=tensor.nnet.relu(conv2d(lrn1,c_l2))
conv2_shp=get_conv_output_shape(lrn1_shp,c_l2.get_value().shape,border_mode='valid',subsample=(1,1))
print "conv2 size ", conv2_shp
pool2=pool_2d(conv2,(2,2),st=(2,2),ignore_border=True)
pool2_shp=get_pool_output_shape(conv2_shp,pool_size=(2,2),st=(2,2),ignore_border=True)
print "pool2 size ", pool2_shp
lrn2=LRN(pool2,pool2_shp)
lrn2_shp=tuple(pool2_shp)
print "cross map norm2 size " , lrn2_shp
fpool2=tensor.flatten(lrn2,outdim=2)
full1=tensor.nnet.relu(tensor.dot(fpool2,f_l1))
pyx=tensor.nnet.sigmoid(tensor.dot(full1,f_l2))
return c_l1, c_l2, f_l1, f_l2, pyx
示例9: pool3d
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
if pool_mode == 'max':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='max')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
示例10: CNN
def CNN(x,c_l1,c_l2,f_l1,f_l2):
conv1=tensor.nnet.relu(conv2d(x,c_l1)) #default stride=1 --subsample=(1,1)
pool1=pool_2d(conv1,(2,2),st=(2,2),ignore_border=True) #default maxpool
conv2=tensor.nnet.relu(conv2d(pool1,c_l2))
pool2=pool_2d(conv2,(2,2),st=(2,2),ignore_border=True)
fpool2=tensor.flatten(pool2,outdim=2)
full1=tensor.nnet.relu(tensor.dot(fpool2,f_l1))
pyx=tensor.nnet.sigmoid(tensor.dot(full1,f_l2))
return c_l1, c_l2, f_l1, f_l2, pyx
示例11: modelFlow
def modelFlow(X, params):
lconv1 = relu(conv2d(X, params[0][0], border_mode='full') +
params[0][1].dimshuffle('x', 0, 'x', 'x'))
lds1 = pool_2d(lconv1, (2, 2))
lconv2 = relu(conv2d(lds1, params[1][0]) +
params[1][1].dimshuffle('x', 0, 'x', 'x'))
lds2 = pool_2d(lconv2, (2, 2))
lconv3 = relu(conv2d(lds2, params[2][0]) +
params[2][1].dimshuffle('x', 0, 'x', 'x'))
lds3 = pool_2d(lconv3, (2, 2))
return X, lconv1, lds1, lconv2, lds2, lconv3, lds3
示例12: run_test
def run_test(direction='forward'):
print ('=' * 60)
print ('generate relu_pool graph before and after opt for %s pass' % direction)
x = T.ftensor4('x')
maxpoolshp = (2, 2)
ignore_border = False
mode = 'max'
imval = np.random.rand(4, 2, 16, 16).astype(np.float32)
reluOut = T.nnet.relu(x)
poolOut = pool.pool_2d(reluOut, maxpoolshp, ignore_border, mode=mode)
if direction == 'forward':
theano.printing.pydotprint(poolOut, outfile="relu_pool_before_opt.png", var_with_name_simple=True)
f = theano.function(inputs=[x], outputs=[poolOut])
theano.printing.pydotprint(f, outfile="relu_pool_after_opt.png", var_with_name_simple=True)
f(imval)
elif direction == 'backward':
poolSum = T.sum(poolOut)
poolBackward = T.grad(poolSum, [x])
theano.printing.pydotprint(poolBackward, outfile="relu_poolBackward_before_opt.png", var_with_name_simple=True)
f = theano.function(inputs=[x], outputs=poolBackward)
theano.printing.pydotprint(f, outfile="relu_poolBackward_after_opt.png", var_with_name_simple=True)
f(imval)
else:
print ("Invalid direction, only forward or backward allowed!")
示例13: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else: # CPU veresion support by theano
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode=pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例14: test_max_pool_2d_2D
def test_max_pool_2d_2D(self):
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 2))
imval = rng.rand(4, 5)
images = tensor.dmatrix()
for maxpoolshp, ignore_border, mode in product(maxpoolshps,
[True, False],
['max', 'sum',
'average_inc_pad',
'average_exc_pad']):
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,
ignore_border,
mode=mode)
output = pool_2d(images, maxpoolshp, ignore_border,
mode=mode)
output_val = function([images], output)(imval)
utt.assert_allclose(output_val, numpy_output_val)
def mp(input):
return pool_2d(input, maxpoolshp, ignore_border,
mode=mode)
utt.verify_grad(mp, [imval], rng=rng)
示例15: model
def model(X, params, pDropConv, pDropHidden):
lnum = 0 # conv: (32, 32) pool: (16, 16)
layer = nin(X, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (16, 16) pool: (8, 8)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (8, 8) pool: (4, 4)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1
layer = gap(layer, params[lnum])
return softmax(layer) # 如果使用nnet中的softmax训练产生NAN