本文整理汇总了Python中theano.tensor.signal.downsample.DownsampleFactorMax.out_shape方法的典型用法代码示例。如果您正苦于以下问题:Python DownsampleFactorMax.out_shape方法的具体用法?Python DownsampleFactorMax.out_shape怎么用?Python DownsampleFactorMax.out_shape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor.signal.downsample.DownsampleFactorMax
的用法示例。
在下文中一共展示了DownsampleFactorMax.out_shape方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_dim
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def get_dim(self, name):
if name == 'input_':
return self.input_dim
if name == 'output':
return tuple(DownsampleFactorMax.out_shape(self.input_dim,
self.pooling_size,
st=self.step))
示例2: test_DownsampleFactorMaxGrad_grad_st_extra
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_DownsampleFactorMaxGrad_grad_st_extra(self):
"""checks the gradient of the gradient for the case that
stride is used for extra examples"""
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((5, 3), (5, 3), (5, 3), (5, 5), (3, 2), (7, 7), (9, 9))
stridesizes = ((3, 2), (7, 5), (10, 6), (1, 1), (2, 3), (10, 10), (1, 1))
imvsizs = ((16, 16), (16, 16), (16, 16), (8, 5), (8, 5), (8, 5), (8, 5))
for indx in numpy.arange(len(maxpoolshps)):
imvsize = imvsizs[indx]
imval = rng.rand(1, 2, imvsize[0], imvsize[1])
stride = stridesizes[indx]
maxpoolshp = maxpoolshps[indx]
for ignore_border in [True, False]:
grad_shape = DownsampleFactorMax.out_shape(
imval.shape, maxpoolshp, ignore_border=ignore_border, st=stride
)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
out = DownsampleFactorMax(maxpoolshp, ignore_border=ignore_border, st=stride)(input)
grad_op = DownsampleFactorMaxGrad(maxpoolshp, ignore_border=ignore_border, st=stride)
return grad_op(input, out, grad)
# skip the grad verification when the output is empty
if numpy.prod(grad_shape) == 0:
continue
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例3: test_AveragePoolPaddingStride_grad_grad
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_AveragePoolPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
imgsizes = ((10, 10), (10, 5), (5, 5))
avgpoolsizes = ((5, 3), (3, 5), (3, 3))
stridesizes = ((3, 2), (2, 3), (3, 3))
paddingsizes = ((2, 2), (2, 1), (2, 2))
for i in range(len(imgsizes)):
imgsize = imgsizes[i]
imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
avgpoolsize = avgpoolsizes[i]
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
# 'average_exc_pad' with non-zero padding is not implemented
for mode in ['sum', 'average_inc_pad']:
grad_shape = DownsampleFactorMax.out_shape(imval.shape,
avgpoolsize, st=stridesize,
ignore_border=True, padding=paddingsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
grad_op = AveragePoolGrad(avgpoolsize, ignore_border=True,
st=stridesize, padding=paddingsize,
mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例4: test_DownsampleFactorMaxGrad_grad_st
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_DownsampleFactorMaxGrad_grad_st(self):
"""checks the gradient of the gradient for
the case that stride is used"""
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 3), (5, 3))
stridesizes = ((1, 1), (3, 3), (5, 7))
imval = rng.rand(1, 2, 16, 16)
for maxpoolshp in maxpoolshps:
for ignore_border in [True, False]:
for stride in stridesizes:
grad_shape = DownsampleFactorMax.out_shape(
imval.shape, maxpoolshp,
ignore_border=ignore_border, st=stride)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
out = DownsampleFactorMax(
maxpoolshp, ignore_border=ignore_border,
st=stride)(input)
grad_op = MaxPoolGrad(
maxpoolshp, ignore_border=ignore_border,
st=stride)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例5: get_dim
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def get_dim(self, name):
if name == 'input_':
return self.input_dim
if name == 'output':
return tuple(DownsampleFactorMax.out_shape(
self.input_dim, self.pooling_size, st=self.step,
ignore_border=self.ignore_border, padding=self.padding))
示例6: test_DownsampleFactorMaxPaddingStride_grad_grad
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_DownsampleFactorMaxPaddingStride_grad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
imgsizes = ((10, 10), (10, 5), (5, 5))
maxpoolsizes = ((5, 3), (3, 5), (3, 3))
stridesizes = ((3, 2), (2, 3), (3, 3))
paddingsizes = ((2, 2), (2, 1), (2, 2))
for i in range(len(imgsizes)):
imgsize = imgsizes[i]
imval = rng.rand(1, 1, imgsize[0], imgsize[1]) * 10.0
maxpoolsize = maxpoolsizes[i]
stridesize = stridesizes[i]
paddingsize = paddingsizes[i]
grad_shape = DownsampleFactorMax.out_shape(imval.shape,
maxpoolsize, st=stridesize,
ignore_border=True,
padding=paddingsize)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
out = DownsampleFactorMax(
maxpoolsize, ignore_border=True,
st=stridesize,
padding=paddingsize,
)(input)
grad_op = MaxPoolGrad(maxpoolsize, ignore_border=True,
st=stridesize, padding=paddingsize)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例7: pool_output_shape_2d
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def pool_output_shape_2d(input_shape,
axes,
pool_shape,
strides,
pads,
ignore_border=True):
"""
compute output shape for a pool
"""
return tuple(DownsampleFactorMax.out_shape(
imgshape=input_shape,
ds=pool_shape,
st=strides,
ignore_border=ignore_border,
padding=pads,
))
示例8: test_DownsampleFactorMaxGrad_grad
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_DownsampleFactorMaxGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 2), (2, 3))
imval = rng.rand(2, 3, 3, 4) * 10.0
# more variance means numeric gradient will be more accurate
for maxpoolshp in maxpoolshps:
for ignore_border in [True, False]:
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
# The shape of the gradient will be the shape of the output
grad_shape = DownsampleFactorMax.out_shape(imval.shape, maxpoolshp, ignore_border=ignore_border)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
out = DownsampleFactorMax(maxpoolshp, ignore_border=ignore_border)(input)
grad_op = DownsampleFactorMaxGrad(maxpoolshp, ignore_border=ignore_border)
return grad_op(input, out, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例9: test_AveragePoolGrad_grad
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_AveragePoolGrad_grad(self):
rng = numpy.random.RandomState(utt.fetch_seed())
avgpoolshps = ((1, 1), (3, 2), (2, 3))
imval = rng.rand(2, 3, 3, 4) * 10.0
# more variance means numeric gradient will be more accurate
for avgpoolshp in avgpoolshps:
for ignore_border in [True, False]:
for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
# The shape of the gradient will be the shape of the output
grad_shape = DownsampleFactorMax.out_shape(
imval.shape, avgpoolshp, ignore_border=ignore_border)
grad_val = rng.rand(*grad_shape) * 10.0
def mp(input, grad):
grad_op = AveragePoolGrad(
avgpoolshp, ignore_border=ignore_border, mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例10: test_AveragePoolGrad_grad_st
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_AveragePoolGrad_grad_st(self):
"""checks the gradient of the gradient for
the case that stride is used"""
rng = numpy.random.RandomState(utt.fetch_seed())
avgpoolshps = ((1, 1), (3, 3), (5, 3))
stridesizes = ((1, 1), (3, 3), (5, 7))
imval = rng.rand(1, 2, 16, 16)
for avgpoolshp in avgpoolshps:
for ignore_border in [True, False]:
for mode in ['sum', 'average_inc_pad', 'average_exc_pad']:
for stride in stridesizes:
grad_shape = DownsampleFactorMax.out_shape(
imval.shape, avgpoolshp,
ignore_border=ignore_border, st=stride)
grad_val = rng.rand(*grad_shape)
def mp(input, grad):
grad_op = AveragePoolGrad(
avgpoolshp, ignore_border=ignore_border,
st=stride, mode=mode)
return grad_op(input, grad)
utt.verify_grad(mp, [imval, grad_val], rng=rng)
示例11: test_DownsampleFactorMax
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def test_DownsampleFactorMax(self):
rng = numpy.random.RandomState(utt.fetch_seed())
# generate random images
maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3))
imval = rng.rand(4, 2, 16, 16)
images = tensor.dtensor4()
for maxpoolshp, ignore_border, mode in product(maxpoolshps,
[True, False],
['max',
'sum',
'average_inc_pad',
'average_exc_pad']):
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
# Pure Numpy computation
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,
ignore_border,
mode=mode)
output = max_pool_2d(images, maxpoolshp, ignore_border,
mode=mode)
f = function([images, ], [output, ])
output_val = f(imval)
utt.assert_allclose(output_val, numpy_output_val)
# DownsampleFactorMax op
maxpool_op = DownsampleFactorMax(maxpoolshp,
ignore_border=ignore_border,
mode=mode)(images)
output_shape = DownsampleFactorMax.out_shape(imval.shape, maxpoolshp,
ignore_border=ignore_border)
utt.assert_allclose(numpy.asarray(output_shape), numpy_output_val.shape)
f = function([images], maxpool_op)
output_val = f(imval)
utt.assert_allclose(output_val, numpy_output_val)
示例12: maxpool_2d
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def maxpool_2d(z, in_dim, poolsize, poolstride):
z = max_pool_2d(z, ds=poolsize, st=poolstride)
output_size = tuple(DownsampleFactorMax.out_shape(in_dim, poolsize,
st=poolstride))
return z, output_size
示例13: init_net
# 需要导入模块: from theano.tensor.signal.downsample import DownsampleFactorMax [as 别名]
# 或者: from theano.tensor.signal.downsample.DownsampleFactorMax import out_shape [as 别名]
def init_net(num_of_classes, input_len, conv_params):
"""
Major initialize of the neural net is in this method. You can adjust convolutional window size for each layer,
number of filters for each layer and all the cascade parameters for every layer. We also initialize and define weights
for neural net.
:param num_of_classes: number of classes
:param input_len: read (sequence chunk) length
:return: weights in param variable, X and Y matrices, cost function, update function and maxima prediction
"""
cwin1=4*6 # multiples of 4 because of data representation
cwin2=3
cwin3=2
num_filters_1=32 / 2 # how many different filters to learn at each layer
num_filters_2=48 / 2
num_filters_3=64 / 2
# size of convolution windows, for each layer different values can be used
w = init_weights((num_filters_1, 1, 1, cwin1)) # first convolution, 32 filters, stack size 1, 1 rows, cwin1 columns
w2 = init_weights((num_filters_2, num_filters_1, 1, cwin2)) # second convolution, 64 filters, stack size 32 (one stack for each filter from previous layer), 1 row, cwin2 columns
w3 = init_weights((num_filters_3, num_filters_2, 1, cwin3)) # third convolution, 128 filters, stack size 64 (one stack for each filter from previous layes), 1 row, cwin3 columns
print "#### CONVOLUTION PARAMETERS ####"
print "cwin1 %d" % cwin1
print "cwin2 %d" % cwin2
print "cwin3 %d" % cwin3
print "num_filters_1 %d" % num_filters_1
print "num_filters_2 %d" % num_filters_2
print "num_filters_3 %d" % num_filters_3
# convolution: filters are moved by one position at a time, see parameter subsample=(1, 1)
#
# max pooling:
# scaling the input before applying the maxpool filter and
# displacement (stride) when sliding the max pool filters
# l1 conv:
es = input_len
es = (es - cwin1 + 1)
es = es / conv1_stride
# l1 max_pool:
es = DownsampleFactorMax.out_shape((1, es), (1, downscale1), st=(1, stride1))[1] # downscale for first layer
print "l1 es:", es
# l2 conv:
es = (es - cwin2 + 1)
# l2 max_pool:
es = DownsampleFactorMax.out_shape((1, es), (1, downscale2), st=(1, stride2))[1] # downscale for second layer
print "l2 es:", es
# l3 conv:
es = (es - cwin3 + 1)
# l3 max_pool:
es = DownsampleFactorMax.out_shape((1, es), (1, downscale3), st=(1, stride3))[1] # downscale for third layer
print "l3 es:", es
# downscaling is performed so that we correctly set number of filters in last layer
w4 = init_weights((num_filters_3 * es, 500)) # fully conected last layer, connects the outputs of 128 filters to 500 (arbitrary) hidden nodes, which are then connected to the output nodes
w_o = init_weights((500, num_of_classes)) # number of exptected classes
# matrix types
X = T.ftensor4()
Y = T.fmatrix()
noise_l1, noise_l2, noise_l3, noise_l4, noise_py_x = model(X, w, w2, w3, w4, 0.2, 0.5, w_o, conv_params)
l1, l2, l3, l4, py_x = model(X, w, w2, w3, w4, 0., 0., w_o, conv_params)
y_x = T.argmax(py_x, axis=1) # maxima predictions
cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y)) # classification matrix to optimize - maximize the value that is actually there and minimize the others
params = [w, w2, w3, w4, w_o]
updates = RMSprop(cost, params, lr=0.001) # update function
return params, X, Y, cost, updates, y_x