本文整理汇总了Python中lasagne.init.GlorotUniform方法的典型用法代码示例。如果您正苦于以下问题:Python init.GlorotUniform方法的具体用法?Python init.GlorotUniform怎么用?Python init.GlorotUniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.init
的用法示例。
在下文中一共展示了init.GlorotUniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(CRFLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels + 1
self.pad_label_index = num_labels
num_inputs = self.input_shape[2]
self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
示例2: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
示例3: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
示例4: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(DepParserLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels
num_inputs = self.input_shape[2]
# add parameters
self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h')
self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c')
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
示例5: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, p=0.5, shared_axes=(), noise_samples=None,
**kwargs):
super(DenseDropoutLayer, self).__init__(
incoming, num_units, W, b, nonlinearity,
num_leading_axes, **kwargs)
self.p = p
self.shared_axes = shared_axes
# init randon number generator
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
# initialize noise samples
self.noise = self.init_noise(noise_samples)
示例6: smart_init
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def smart_init(shape):
if len(shape) > 1:
return init.GlorotUniform()(shape)
else:
return init.Normal()(shape)
示例7: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, W_in=init.GlorotUniform(), W_hid=init.GlorotUniform(),
W_cell=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.sigmoid):
self.W_in = W_in
self.W_hid = W_hid
# Don't store a cell weight vector when cell is None
if W_cell is not None:
self.W_cell = W_cell
self.b = b
# For the nonlinearity, if None is supplied, use identity
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
示例8: createCNN
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def createCNN(self):
net = {}
net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)
print("Input shape: {0}".format(net['input'].output_shape))
#STAGE 1
net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)
net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)
net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))
net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)
for i in range(1, self.nStages):
self.addDANStage(i + 1, net)
net['output'] = net['s' + str(self.nStages) + '_landmarks']
return net
示例9: createCNN
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def createCNN(self):
net = {}
net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)
print("Input shape: {0}".format(net['input'].output_shape))
#STAGE 1
net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)
net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)
net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))
net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)
if self.confidenceLayer:
net['s1_confidence'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=2, W=GlorotUniform('relu'), nonlinearity=lasagne.nonlinearities.softmax)
for i in range(1, self.nStages):
self.addDANStage(i + 1, net)
net['output'] = net['s' + str(self.nStages) + '_landmarks']
if self.confidenceLayer:
net['output'] = lasagne.layers.ConcatLayer([net['output'], net['s1_confidence']])
return net
示例10: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, **kwargs):
super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
if num_leading_axes >= len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"leaving no trailing axes for the dot product." %
(num_leading_axes, len(self.input_shape)))
elif num_leading_axes < -len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"requesting more trailing axes than there are input "
"dimensions." % (num_leading_axes, len(self.input_shape)))
self.num_leading_axes = num_leading_axes
if any(s is None for s in self.input_shape[num_leading_axes:]):
raise ValueError(
"A DenseLayer requires a fixed input shape (except for "
"the leading axes). Got %r for num_leading_axes=%d." %
(self.input_shape, self.num_leading_axes))
num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b",
regularizable=False)
if args.regL1 is True:
self.L1 = self.add_param(init.Constant(args.regInit['L1']),
(num_inputs, num_units), name="L1")
if args.regL2 is True:
self.L2 = self.add_param(init.Constant(args.regInit['L2']),
(num_inputs, num_units), name="L2")
示例11: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming, num_filters, num_rot,
filter_size, stride=(1, 1),
border_mode="valid", untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d, **kwargs):
super(RotConv, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.num_rot = num_rot;
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.border_mode = border_mode
self.untie_biases = untie_biases
self.convolution = convolution
if self.border_mode not in ['valid', 'full', 'same']:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2], self.
output_shape[3])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
示例12: addConvModule
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def addConvModule(nnet, num_filters, filter_size, pad='same', W_init=None, bias=True, pool_size=(2,2),
use_batch_norm=False, dropout=False, p_dropout=0.5, upscale=False):
"""
add a convolutional module (convolutional layer + (leaky) ReLU + MaxPool) to the network
"""
if W_init is None:
W = GlorotUniform(gain=(2/(1+0.01**2)) ** 0.5) # gain adjusted for leaky ReLU with alpha=0.01
else:
W = W_init
if bias is True:
b = Constant(0.)
else:
b = None
# build module
if dropout:
nnet.addDropoutLayer(p=p_dropout)
nnet.addConvLayer(use_batch_norm=use_batch_norm,
num_filters=num_filters,
filter_size=filter_size,
pad=pad,
W=W,
b=b)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
if upscale:
nnet.addUpscale(scale_factor=pool_size)
else:
nnet.addMaxPool(pool_size=pool_size)
示例13: __create_toplogy__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __create_toplogy__(self, input_var_first=None, input_var_second=None):
# define network topology
if (self.conf.rep % 2 != 0):
raise ValueError("Representation size should be divisible by two as it's formed by combining two crossmodal translations", self.conf.rep)
# input layers
l_in_first = InputLayer(shape=(self.conf.batch_size, self.conf.mod1size), input_var=input_var_first)
l_in_second = InputLayer(shape=(self.conf.batch_size, self.conf.mod2size), input_var=input_var_second)
# first -> second
l_hidden1_first = DenseLayer(l_in_first, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1
l_hidden2_first = DenseLayer(l_hidden1_first, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2
l_hidden2_first_d = DropoutLayer(l_hidden2_first, p=self.conf.dropout)
l_hidden3_first = DenseLayer(l_hidden2_first_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # dec1
l_out_first = DenseLayer(l_hidden3_first, num_units=self.conf.mod2size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2
if self.conf.untied:
# FREE
l_hidden1_second = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1
l_hidden2_second = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=GlorotUniform()) # enc2
l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout)
l_hidden3_second = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # dec1
l_out_second = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2
else:
# TIED middle
l_hidden1_second = DenseLayer(l_in_second, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=GlorotUniform()) # enc1
l_hidden2_second = DenseLayer(l_hidden1_second, num_units=self.conf.rep//2, nonlinearity=self.conf.act, W=l_hidden3_first.W.T) # enc2
l_hidden2_second_d = DropoutLayer(l_hidden2_second, p=self.conf.dropout)
l_hidden3_second = DenseLayer(l_hidden2_second_d, num_units=self.conf.hdn, nonlinearity=self.conf.act, W=l_hidden2_first.W.T) # dec1
l_out_second = DenseLayer(l_hidden3_second, num_units=self.conf.mod1size, nonlinearity=self.conf.act, W=GlorotUniform()) # dec2
l_out = concat([l_out_first, l_out_second])
return l_out, l_hidden2_first, l_hidden2_second
示例14: addDANStage
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def addDANStage(self, stageIdx, net):
prevStage = 's' + str(stageIdx - 1)
curStage = 's' + str(stageIdx)
#CONNNECTION LAYERS OF PREVIOUS STAGE
net[prevStage + '_transform_params'] = TransformParamsLayer(net[prevStage + '_landmarks'], self.initLandmarks)
net[prevStage + '_img_output'] = AffineTransformLayer(net['input'], net[prevStage + '_transform_params'])
net[prevStage + '_landmarks_affine'] = LandmarkTransformLayer(net[prevStage + '_landmarks'], net[prevStage + '_transform_params'])
net[prevStage + '_img_landmarks'] = LandmarkImageLayer(net[prevStage + '_landmarks_affine'], (self.imageHeight, self.imageWidth), self.landmarkPatchSize)
net[prevStage + '_img_feature'] = lasagne.layers.DenseLayer(net[prevStage + '_fc1'], num_units=56 * 56, W=GlorotUniform('relu'))
net[prevStage + '_img_feature'] = lasagne.layers.ReshapeLayer(net[prevStage + '_img_feature'], (-1, 1, 56, 56))
net[prevStage + '_img_feature'] = lasagne.layers.Upscale2DLayer(net[prevStage + '_img_feature'], 2)
#CURRENT STAGE
net[curStage + '_input'] = batch_norm(lasagne.layers.ConcatLayer([net[prevStage + '_img_output'], net[prevStage + '_img_landmarks'], net[prevStage + '_img_feature']], 1))
net[curStage + '_conv1_1'] = batch_norm(Conv2DLayer(net[curStage + '_input'], 64, 3, pad='same', W=GlorotUniform('relu')))
net[curStage + '_conv1_2'] = batch_norm(Conv2DLayer(net[curStage + '_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
net[curStage + '_pool1'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv1_2'], 2)
net[curStage + '_conv2_1'] = batch_norm(Conv2DLayer(net[curStage + '_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net[curStage + '_conv2_2'] = batch_norm(Conv2DLayer(net[curStage + '_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net[curStage + '_pool2'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv2_2'], 2)
net[curStage + '_conv3_1'] = batch_norm (Conv2DLayer(net[curStage + '_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
net[curStage + '_conv3_2'] = batch_norm (Conv2DLayer(net[curStage + '_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))
net[curStage + '_pool3'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv3_2'], 2)
net[curStage + '_conv4_1'] = batch_norm(Conv2DLayer(net[curStage + '_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
net[curStage + '_conv4_2'] = batch_norm (Conv2DLayer(net[curStage + '_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))
net[curStage + '_pool4'] = lasagne.layers.Pool2DLayer(net[curStage + '_conv4_2'], 2)
net[curStage + '_pool4'] = lasagne.layers.FlattenLayer(net[curStage + '_pool4'])
net[curStage + '_fc1_dropout'] = lasagne.layers.DropoutLayer(net[curStage + '_pool4'], p=0.5)
net[curStage + '_fc1'] = batch_norm(lasagne.layers.DenseLayer(net[curStage + '_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))
net[curStage + '_output'] = lasagne.layers.DenseLayer(net[curStage + '_fc1'], num_units=136, nonlinearity=None)
net[curStage + '_landmarks'] = lasagne.layers.ElemwiseSumLayer([net[prevStage + '_landmarks_affine'], net[curStage + '_output']])
net[curStage + '_landmarks'] = LandmarkTransformLayer(net[curStage + '_landmarks'], net[prevStage + '_transform_params'], True)
示例15: __init__
# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import GlorotUniform [as 别名]
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
flip_filters=True, convolution=theano.tensor.nnet.conv2d,
centered=True, **kwargs):
"""A padded convolutional layer
Note
----
If used in place of a :class:``lasagne.layers.Conv2DLayer`` be
sure to specify `flag_filters=False`, which is the default for
that layer
Parameters
----------
incoming : lasagne.layers.Layer
The input layer
num_filters : int
The number of filters or kernels of the convolution
filter_size : int or iterable of int
The size of the filters
stride : int or iterable of int
The stride or subsampling of the convolution
pad : int, iterable of int, ``full``, ``same`` or ``valid``
**Ignored!** Kept for compatibility with the
:class:``lasagne.layers.Conv2DLayer``
untie_biases : bool
See :class:``lasagne.layers.Conv2DLayer``
W : Theano shared variable, expression, numpy array or callable
See :class:``lasagne.layers.Conv2DLayer``
b : Theano shared variable, expression, numpy array, callable or None
See :class:``lasagne.layers.Conv2DLayer``
nonlinearity : callable or None
See :class:``lasagne.layers.Conv2DLayer``
flip_filters : bool
See :class:``lasagne.layers.Conv2DLayer``
convolution : callable
See :class:``lasagne.layers.Conv2DLayer``
centered : bool
If True, the padding will be added on both sides. If False
the zero padding will be applied on the upper left side.
**kwargs
Any additional keyword arguments are passed to the
:class:``lasagne.layers.Layer`` superclass
"""
self.centered = centered
if pad not in [0, (0, 0), [0, 0]]:
warnings.warn('The specified padding will be ignored',
RuntimeWarning)
super(PaddedConv2DLayer, self).__init__(incoming, num_filters,
filter_size, stride, pad,
untie_biases, W, b,
nonlinearity, flip_filters,
**kwargs)
if self.input_shape[2:] != (None, None):
warnings.warn('This Layer should only be used when the size of '
'the image is not known', RuntimeWarning)