本文整理汇总了Python中lasagne.layers.dnn.Conv2DDNNLayer方法的典型用法代码示例。如果您正苦于以下问题:Python dnn.Conv2DDNNLayer方法的具体用法?Python dnn.Conv2DDNNLayer怎么用?Python dnn.Conv2DDNNLayer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.layers.dnn
的用法示例。
在下文中一共展示了dnn.Conv2DDNNLayer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def __init__(self, incoming, radii, normalise=True,
stride=(1, 1), pad=0, **kwargs):
# Use predetermined Hough filters
W = _create_hough_filters(2*np.max(radii)+1, radii, normalise=normalise)
# Transform to correct shape and dtype
W = W[:, np.newaxis, :, :].astype('float32')
# remove biasses and nonlinearities
b = None
untie_biases = False
nonlinearity = None
flip_filters = True # doesn't matter
super(Conv2DDNNLayer, self).__init__(incoming, W.shape[0],
W.shape[-2:], stride, pad,
untie_biases, W, b, nonlinearity,
flip_filters, n=2, **kwargs)
# Remove trainable tag for W
self.params[self.W] = self.params[self.W].difference(set(['trainable']))
示例2: build_inception_module
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_inception_module(name, input_layer, nfilters):
# nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
net = {}
net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
net['pool_proj'] = ConvLayer(
net['pool'], nfilters[0], 1, flip_filters=False)
net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)
net['3x3_reduce'] = ConvLayer(
input_layer, nfilters[2], 1, flip_filters=False)
net['3x3'] = ConvLayer(
net['3x3_reduce'], nfilters[3], 3, pad=1, flip_filters=False)
net['5x5_reduce'] = ConvLayer(
input_layer, nfilters[4], 1, flip_filters=False)
net['5x5'] = ConvLayer(
net['5x5_reduce'], nfilters[5], 5, pad=2, flip_filters=False)
net['output'] = ConcatLayer([
net['1x1'],
net['3x3'],
net['5x5'],
net['pool_proj'],
])
return {'{}/{}'.format(name, k): v for k, v in net.items()}
示例3: create_network
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer16, num_units=256)
network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
return network
#random search to initialize the weights
示例4: create_network
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
#layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer15, num_units=256)
network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
return network
#random search to initialize the weights
示例5: residual_block
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def residual_block(l, increase_dim=False, projection=True, first=False):
"""
Create a residual learning building block with two stacked 3x3 convlayers as in paper
'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
"""
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2, 2)
out_num_filters = input_num_filters * 2
else:
first_stride = (1, 1)
out_num_filters = input_num_filters
if first:
# hacky solution to keep layers correct
bn_pre_relu = l
else:
# contains the BN -> ReLU portion, steps 1 to 2
bn_pre_conv = BatchNormLayer(l)
bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)
# contains the weight -> BN -> ReLU portion, steps 3 to 5
conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride,
nonlinearity=rectify, pad='same', W=he_norm))
# contains the last weight portion, step 6
conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None,
pad='same', W=he_norm)
# add shortcut connections
if increase_dim:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
pad='same', b=None)
block = ElemwiseSumLayer([conv_2, projection])
else:
block = ElemwiseSumLayer([conv_2, l])
return block
示例6: ResNet_FullPreActivation
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
"""
Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
Formula to figure out depth: 6n + 2
"""
# Building the network
l_in = InputLayer(shape=input_shape, input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))
# first stack of residual blocks, output is 16 x 32 x 32
l = residual_block(l, first=True)
for _ in range(1, n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
bn_post_conv = BatchNormLayer(l)
bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)
# average pooling
avg_pool = GlobalPoolLayer(bn_post_relu)
# fully connected layer
network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)
return network
示例7: build_model
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_model(input_var):
net = {}
net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var)
net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False)
net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False)
net['pool1'] = PoolLayer(net['conv1_2'], 2)
net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False)
net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False)
net['pool2'] = PoolLayer(net['conv2_2'], 2)
net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False)
net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False)
net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False)
net['pool3'] = PoolLayer(net['conv3_3'], 2)
net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False)
net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False)
net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False)
net['pool4'] = PoolLayer(net['conv4_3'], 2)
net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False)
net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False)
net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False)
net['pool5'] = PoolLayer(net['conv5_3'], 2)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096)
net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net
示例8: pd
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def pd(num_layers=2,num_filters=32,filter_size=(3,3),pad=1,stride = (1,1),nonlinearity=elu,style='convolutional',bnorm=1,**kwargs):
input_args = locals()
input_args.pop('num_layers')
return {key:entry if type(entry) is list else [entry]*num_layers for key,entry in input_args.iteritems()}
# Possible Conv2DDNN convenience function. Remember to delete the C2D import at the top if you use this
# def C2D(incoming = None, num_filters = 32, filter_size= [3,3],pad = 'same',stride = [1,1], W = initmethod('relu'),nonlinearity = elu,name = None):
# return lasagne.layers.dnn.Conv2DDNNLayer(incoming,num_filters,filter_size,stride,pad,False,W,None,nonlinearity,False)
# Shape-Preserving Gaussian Sample layer for latent vectors with spatial dimensions.
# This is a holdover from an "old" (i.e. I abandoned it last month) idea.
示例9: initialize_network
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def initialize_network(width):
# initialize network - VGG19 style
net = {}
net['input'] = InputLayer((1, 3, width, width))
net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1)
net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1)
net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad')
net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1)
net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1)
net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad')
net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1)
net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1)
net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1)
net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1)
net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad')
net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1)
net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1)
net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1)
net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1)
net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad')
net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1)
net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1)
net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1)
net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1)
net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad')
return net
示例10: build_model
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 32, 32))
net['conv1'] = ConvLayer(net['input'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp1'] = ConvLayer(
net['conv1'], num_filters=160, filter_size=1, flip_filters=False)
net['cccp2'] = ConvLayer(
net['cccp1'], num_filters=96, filter_size=1, flip_filters=False)
net['pool1'] = PoolLayer(net['cccp2'],
pool_size=3,
stride=2,
mode='max',
ignore_border=False)
net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
net['conv2'] = ConvLayer(net['drop3'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp3'] = ConvLayer(
net['conv2'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp4'] = ConvLayer(
net['cccp3'], num_filters=192, filter_size=1, flip_filters=False)
net['pool2'] = PoolLayer(net['cccp4'],
pool_size=3,
stride=2,
mode='average_exc_pad',
ignore_border=False)
net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
net['conv3'] = ConvLayer(net['drop6'],
num_filters=192,
filter_size=3,
pad=1,
flip_filters=False)
net['cccp5'] = ConvLayer(
net['conv3'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp6'] = ConvLayer(
net['cccp5'], num_filters=10, filter_size=1, flip_filters=False)
net['pool3'] = PoolLayer(net['cccp6'],
pool_size=8,
mode='average_exc_pad',
ignore_border=False)
net['output'] = FlattenLayer(net['pool3'])
return net
示例11: build_model
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
net['conv1'] = ConvLayer(net['input'],
num_filters=96,
filter_size=7,
stride=2,
flip_filters=False)
# caffe has alpha = alpha * pool_size
net['norm1'] = NormLayer(net['conv1'], alpha=0.0001)
net['pool1'] = PoolLayer(net['norm1'],
pool_size=3,
stride=3,
ignore_border=False)
net['conv2'] = ConvLayer(net['pool1'],
num_filters=256,
filter_size=5,
flip_filters=False)
net['pool2'] = PoolLayer(net['conv2'],
pool_size=2,
stride=2,
ignore_border=False)
net['conv3'] = ConvLayer(net['pool2'],
num_filters=512,
filter_size=3,
pad=1,
flip_filters=False)
net['conv4'] = ConvLayer(net['conv3'],
num_filters=512,
filter_size=3,
pad=1,
flip_filters=False)
net['conv5'] = ConvLayer(net['conv4'],
num_filters=512,
filter_size=3,
pad=1,
flip_filters=False)
net['pool5'] = PoolLayer(net['conv5'],
pool_size=3,
stride=3,
ignore_border=False)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net
示例12: build_model
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, None, None))
net['conv1/7x7_s2'] = ConvLayer(
net['input'], 64, 7, stride=2, pad=3, flip_filters=False)
net['pool1/3x3_s2'] = PoolLayer(
net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False)
net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
net['conv2/3x3_reduce'] = ConvLayer(
net['pool1/norm1'], 64, 1, flip_filters=False)
net['conv2/3x3'] = ConvLayer(
net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False)
net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
net['pool2/3x3_s2'] = PoolLayer(
net['conv2/norm2'], pool_size=3, stride=2, ignore_border=False)
net.update(build_inception_module('inception_3a',
net['pool2/3x3_s2'],
[32, 64, 96, 128, 16, 32]))
net.update(build_inception_module('inception_3b',
net['inception_3a/output'],
[64, 128, 128, 192, 32, 96]))
net['pool3/3x3_s2'] = PoolLayer(
net['inception_3b/output'], pool_size=3, stride=2, ignore_border=False)
net.update(build_inception_module('inception_4a',
net['pool3/3x3_s2'],
[64, 192, 96, 208, 16, 48]))
net.update(build_inception_module('inception_4b',
net['inception_4a/output'],
[64, 160, 112, 224, 24, 64]))
net.update(build_inception_module('inception_4c',
net['inception_4b/output'],
[64, 128, 128, 256, 24, 64]))
net.update(build_inception_module('inception_4d',
net['inception_4c/output'],
[64, 112, 144, 288, 32, 64]))
net.update(build_inception_module('inception_4e',
net['inception_4d/output'],
[128, 256, 160, 320, 32, 128]))
net['pool4/3x3_s2'] = PoolLayer(
net['inception_4e/output'], pool_size=3, stride=2, ignore_border=False)
net.update(build_inception_module('inception_5a',
net['pool4/3x3_s2'],
[128, 256, 160, 320, 32, 128]))
net.update(build_inception_module('inception_5b',
net['inception_5a/output'],
[128, 384, 192, 384, 48, 128]))
net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'],
num_units=1000,
nonlinearity=linear)
net['prob'] = NonlinearityLayer(net['loss3/classifier'],
nonlinearity=softmax)
return net
示例13: build_model
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
net['conv1_1'] = ConvLayer(
net['input'], 64, 3, pad=1, flip_filters=False)
net['conv1_2'] = ConvLayer(
net['conv1_1'], 64, 3, pad=1, flip_filters=False)
net['pool1'] = PoolLayer(net['conv1_2'], 2)
net['conv2_1'] = ConvLayer(
net['pool1'], 128, 3, pad=1, flip_filters=False)
net['conv2_2'] = ConvLayer(
net['conv2_1'], 128, 3, pad=1, flip_filters=False)
net['pool2'] = PoolLayer(net['conv2_2'], 2)
net['conv3_1'] = ConvLayer(
net['pool2'], 256, 3, pad=1, flip_filters=False)
net['conv3_2'] = ConvLayer(
net['conv3_1'], 256, 3, pad=1, flip_filters=False)
net['conv3_3'] = ConvLayer(
net['conv3_2'], 256, 3, pad=1, flip_filters=False)
net['pool3'] = PoolLayer(net['conv3_3'], 2)
net['conv4_1'] = ConvLayer(
net['pool3'], 512, 3, pad=1, flip_filters=False)
net['conv4_2'] = ConvLayer(
net['conv4_1'], 512, 3, pad=1, flip_filters=False)
net['conv4_3'] = ConvLayer(
net['conv4_2'], 512, 3, pad=1, flip_filters=False)
net['pool4'] = PoolLayer(net['conv4_3'], 2)
net['conv5_1'] = ConvLayer(
net['pool4'], 512, 3, pad=1, flip_filters=False)
net['conv5_2'] = ConvLayer(
net['conv5_1'], 512, 3, pad=1, flip_filters=False)
net['conv5_3'] = ConvLayer(
net['conv5_2'], 512, 3, pad=1, flip_filters=False)
net['pool5'] = PoolLayer(net['conv5_3'], 2)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096)
net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(
net['fc7_dropout'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net
示例14: build_model
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
net['conv1_1'] = ConvLayer(
net['input'], 64, 3, pad=1, flip_filters=False)
net['conv1_2'] = ConvLayer(
net['conv1_1'], 64, 3, pad=1, flip_filters=False)
net['pool1'] = PoolLayer(net['conv1_2'], 2)
net['conv2_1'] = ConvLayer(
net['pool1'], 128, 3, pad=1, flip_filters=False)
net['conv2_2'] = ConvLayer(
net['conv2_1'], 128, 3, pad=1, flip_filters=False)
net['pool2'] = PoolLayer(net['conv2_2'], 2)
net['conv3_1'] = ConvLayer(
net['pool2'], 256, 3, pad=1, flip_filters=False)
net['conv3_2'] = ConvLayer(
net['conv3_1'], 256, 3, pad=1, flip_filters=False)
net['conv3_3'] = ConvLayer(
net['conv3_2'], 256, 3, pad=1, flip_filters=False)
net['conv3_4'] = ConvLayer(
net['conv3_3'], 256, 3, pad=1, flip_filters=False)
net['pool3'] = PoolLayer(net['conv3_4'], 2)
net['conv4_1'] = ConvLayer(
net['pool3'], 512, 3, pad=1, flip_filters=False)
net['conv4_2'] = ConvLayer(
net['conv4_1'], 512, 3, pad=1, flip_filters=False)
net['conv4_3'] = ConvLayer(
net['conv4_2'], 512, 3, pad=1, flip_filters=False)
net['conv4_4'] = ConvLayer(
net['conv4_3'], 512, 3, pad=1, flip_filters=False)
net['pool4'] = PoolLayer(net['conv4_4'], 2)
net['conv5_1'] = ConvLayer(
net['pool4'], 512, 3, pad=1, flip_filters=False)
net['conv5_2'] = ConvLayer(
net['conv5_1'], 512, 3, pad=1, flip_filters=False)
net['conv5_3'] = ConvLayer(
net['conv5_2'], 512, 3, pad=1, flip_filters=False)
net['conv5_4'] = ConvLayer(
net['conv5_3'], 512, 3, pad=1, flip_filters=False)
net['pool5'] = PoolLayer(net['conv5_4'], 2)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096)
net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(
net['fc7_dropout'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net
示例15: residual_bottleneck_block
# 需要导入模块: from lasagne.layers import dnn [as 别名]
# 或者: from lasagne.layers.dnn import Conv2DDNNLayer [as 别名]
def residual_bottleneck_block(l, increase_dim=False, first=False):
"""
Create a residual learning building block with two stacked 3x3 conv layers as in paper
'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
"""
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2, 2)
out_num_filters = input_num_filters * 2
else:
first_stride = (1, 1)
out_num_filters = input_num_filters
if first:
# hacky solution to keep layers correct
bn_pre_relu = l
out_num_filters = out_num_filters * 4
else:
# contains the BN -> ReLU portion, steps 1 to 2
bn_pre_conv = BatchNormLayer(l)
bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)
bottleneck_filters = out_num_filters / 4
# contains the weight -> BN -> ReLU portion, steps 3 to 5
conv_1 = batch_norm(
ConvLayer(bn_pre_relu, num_filters=bottleneck_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=rectify,
pad='same', W=he_norm))
conv_2 = batch_norm(
ConvLayer(conv_1, num_filters=bottleneck_filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify,
pad='same', W=he_norm))
# contains the last weight portion, step 6
conv_3 = ConvLayer(conv_2, num_filters=out_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None,
pad='same', W=he_norm)
if increase_dim:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
pad='same', b=None)
block = ElemwiseSumLayer([conv_3, projection])
elif first:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None,
pad='same', b=None)
block = ElemwiseSumLayer([conv_3, projection])
else:
block = ElemwiseSumLayer([conv_3, l])
return block