本文整理汇总了Python中theano.tensor.nnet.conv.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python conv.conv2d方法的具体用法?Python conv.conv2d怎么用?Python conv.conv2d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor.nnet.conv
的用法示例。
在下文中一共展示了conv.conv2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
l1a = rectify(conv2d(X, w, border_mode='full'))
l1 = max_pool_2d(l1a, (2, 2))
l1 = dropout(l1, p_drop_conv)
l2a = rectify(conv2d(l1, w2))
l2 = max_pool_2d(l2a, (2, 2))
l2 = dropout(l2, p_drop_conv)
l3a = rectify(conv2d(l2, w3))
l3b = max_pool_2d(l3a, (2, 2))
l3 = T.flatten(l3b, outdim=2)
l3 = dropout(l3, p_drop_conv)
l4 = rectify(T.dot(l3, w4))
l4 = dropout(l4, p_drop_hidden)
pyx = softmax(T.dot(l4, w_o))
return l1, l2, l3, l4, pyx
示例2: local_conv2d_cpu
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def local_conv2d_cpu(node):
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if ((not isinstance(img.type, TensorType) or
not isinstance(kern.type, TensorType))):
return None
if node.op.border_mode not in ['full', 'valid']:
return None
if not node.op.filter_flip:
# Not tested yet
return None
rval = conv2d(img, kern,
node.op.imshp, node.op.kshp,
border_mode=node.op.border_mode,
subsample=node.op.subsample)
copy_stack_trace(node.outputs[0], rval)
return [rval]
示例3: test_broadcast_grad
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def test_broadcast_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
x1 = T.tensor4('x')
x1_data = rng.randn(1, 1, 300, 300)
sigma = T.scalar('sigma')
sigma_data = 20
window_radius = 3
filter_1d = T.arange(-window_radius, window_radius+1)
filter_1d = filter_1d.astype(theano.config.floatX)
filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
filter_1d = filter_1d / filter_1d.sum()
filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])
y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
filter_shape=[1, 1, None, None])
theano.grad(y.sum(), sigma)
示例4: output
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input == None:
input = self.input_layer.output(dropout_active=dropout_active, *args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
if self.dropout_tied:
# tying of the dropout masks across the entire feature maps, so broadcast across the feature maps.
mask = srng.binomial((input.shape[0], input.shape[1]), p=retain_prob, dtype='int32').astype('float32').dimshuffle(0, 1, 'x', 'x')
else:
mask = srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly. By doing this it's no longer necessary to rescale the weights at test time.
input = input / retain_prob * mask
if self.border_mode in ['valid', 'full']:
conved = conv2d(input, self.W, subsample=(1, 1), image_shape=self.input_shape, filter_shape=self.filter_shape, border_mode=self.border_mode)
elif self.border_mode == 'same':
conved = conv2d(input, self.W, subsample=(1, 1), image_shape=self.input_shape, filter_shape=self.filter_shape, border_mode='full')
shift_x = (self.filter_width - 1) // 2
shift_y = (self.filter_height - 1) // 2
conved = conved[:, :, shift_x:self.input_shape[2] + shift_x, shift_y:self.input_shape[3] + shift_y]
else:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
return self.nonlinearity(conved + self.b.dimshuffle('x', 0, 'x', 'x'))
示例5: predict
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
示例6: predict
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
示例7: lmul_T
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def lmul_T(self, x):
"""
.. todo::
WRITEME
"""
# dot(x, A.T)
dummy_v = tensor.tensor4()
z_hs = conv2d(dummy_v, self._filters,
image_shape=self._img_shape,
filter_shape=self._filters_shape,
subsample=self._subsample,
border_mode=self._border_mode,
)
xfilters, xdummy = z_hs.owner.op.grad((dummy_v, self._filters), (x,))
return xfilters
示例8: forward
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def forward(self, inputs):
# if padding is greater than zero, we insert the inputs into
# the center of a larger zero array, effectively adding zero
# borders
if self.pad > 0:
padded_inputs = T.set_subtensor(
T.zeros((inputs.shape[0],
self.inputs_shape[1],
self.inputs_shape[2] + 2 * self.pad,
self.inputs_shape[3] + 2 * self.pad),
dtype=inputs.dtype)[:, :, self.pad:-self.pad, self.pad:-self.pad],
inputs)
else:
padded_inputs = inputs
padded_inputs_shape = (
None,
self.inputs_shape[1],
self.inputs_shape[2] + 2 * self.pad,
self.inputs_shape[3] + 2 * self.pad)
return conv.conv2d(
input=padded_inputs,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=padded_inputs_shape)
示例9: setUp
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def setUp(self):
super(TestConv2D, self).setUp()
self.input = T.tensor4('input', dtype=self.dtype)
self.input.name = 'default_V'
self.filters = T.tensor4('filters', dtype=self.dtype)
self.filters.name = 'default_filters'
if not conv.imported_scipy_signal and theano.config.cxx == "":
raise SkipTest("conv2d tests need SciPy or a c++ compiler")
示例10: speed
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def speed(self):
n_calls = 20000
print("n_calls", n_calls)
for border_mode in ['valid', 'full']:
print()
print(border_mode)
for openmp in [False, True]:
print("OpenMP", openmp)
image_shapes = [(1, 5, 6, 6),
(10, 5, 6, 6),
#(10, 10, 16, 16),
#(10, 10, 32, 32)
]
print("image_shape", image_shapes)
for image_shape in image_shapes:
filter_shapes = [(1, 5, 4, 4), (2, 5, 4, 4), (5, 5, 4, 4)]
print("filter_shapes", filter_shapes)
for filter_shape in filter_shapes:
input = theano.shared(numpy.random.random(image_shape))
filters = theano.shared(numpy.random.random(filter_shape))
output = self.conv2d(input, filters,
image_shape, filter_shape,
border_mode,
unroll_patch=True,
openmp=openmp)
mode = theano.Mode(linker=theano.gof.vm.VM_Linker(
allow_gc=False,
use_cloop=True))
theano_conv = theano.function([], output, mode=mode)
t1 = time.time()
theano_conv.fn(n_calls=n_calls)
t2 = time.time()
print(t2 - t1, end=' ')
print()
示例11: set_inpt
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape(self.image_shape)
conv_out = conv.conv2d(
input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
image_shape=self.image_shape)
pooled_out = downsample.max_pool_2d(
input=conv_out, ds=self.poolsize, ignore_border=True)
self.output = self.activation_fn(
pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output_dropout = self.output # no dropout in the convolutional layers
示例12: conv
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def conv(X, w, b=None):
# z = dnn_conv(X, w, border_mode=int(np.floor(w.get_value().shape[-1]/2.)))
s = int(np.floor(w.get_value().shape[-1]/2.))
z = conv2d(X, w, border_mode='full')[:, :, s:-s, s:-s]
if b is not None:
z += b.dimshuffle('x', 0, 'x', 'x')
return z
示例13: deconv
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def deconv(X, w, b=None):
# z = dnn_conv(X, w, direction_hint="*not* 'forward!", border_mode=int(np.floor(w.get_value().shape[-1]/2.)))
s = int(np.floor(w.get_value().shape[-1]/2.))
z = conv2d(X, w, border_mode='full')[:, :, s:-s, s:-s]
if b is not None:
z += b.dimshuffle('x', 0, 'x', 'x')
return z
示例14: encoder
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def encoder(tparams, layer0_input, filter_shape, pool_size,
prefix='cnn_encoder'):
""" filter_shape: (number of filters, num input feature maps, filter height,
filter width)
image_shape: (batch_size, num input feature maps, image height, image width)
"""
conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')],
filter_shape=filter_shape)
conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x'))
output = pool.pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=True)
return output.flatten(2)
示例15: f_conv
# 需要导入模块: from theano.tensor.nnet import conv [as 别名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 别名]
def f_conv(self, x, spec, in_dim, weight_name):
layer_type, dims = spec
num_filters = dims[0]
filter_size = (dims[1], dims[1])
stride = (dims[2], dims[2])
bm = 'full' if 'convf' in layer_type else 'valid'
num_channels = in_dim[0]
W = self.weight(self.rand_init_conv(
(num_filters, num_channels) + filter_size), weight_name)
if stride != (1, 1):
f = GpuCorrMM(subsample=stride, border_mode=bm, pad=(0, 0))
y = f(gpu_contiguous(x), gpu_contiguous(W))
else:
assert self.p.batch_size == self.p.valid_batch_size
y = conv2d(x, W, image_shape=(2*self.p.batch_size, ) + in_dim,
filter_shape=((num_filters, num_channels) +
filter_size), border_mode=bm)
output_size = ((num_filters,) +
ConvOp.getOutputShape(in_dim[1:], filter_size,
stride, bm))
return y, output_size