本文整理汇总了Python中lasagne.layers.batch_norm方法的典型用法代码示例。如果您正苦于以下问题:Python layers.batch_norm方法的具体用法?Python layers.batch_norm怎么用?Python layers.batch_norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.layers
的用法示例。
在下文中一共展示了layers.batch_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_discriminator_toy
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def build_discriminator_toy(image=None, nd=512, GP_norm=None):
Input = InputLayer(shape=(None, 2), input_var=image)
print ("Dis input:", Input.output_shape)
dis0 = DenseLayer(Input, nd, W=Normal(0.02), nonlinearity=relu)
print ("Dis fc0:", dis0.output_shape)
if GP_norm is True:
dis1 = DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu)
else:
dis1 = batch_norm(DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu))
print ("Dis fc1:", dis1.output_shape)
if GP_norm is True:
dis2 = batch_norm(DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu))
else:
dis2 = DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu)
print ("Dis fc2:", dis2.output_shape)
disout = DenseLayer(dis2, 1, W=Normal(0.02), nonlinearity=sigmoid)
print ("Dis output:", disout.output_shape)
return disout
示例2: build_discriminator_32
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def build_discriminator_32(image=None,ndf=128):
lrelu = LeakyRectify(0.2)
# input: images
InputImg = InputLayer(shape=(None, 3, 32, 32), input_var=image)
print ("Dis Img_input:", InputImg.output_shape)
# Conv Layer
dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
print ("Dis conv1:", dis1.output_shape)
# Conv Layer
dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv2:", dis2.output_shape)
# Conv Layer
dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv3:", dis3.output_shape)
# Conv Layer
dis4 = DenseLayer(dis3, 1, W=Normal(0.02), nonlinearity=sigmoid)
print ("Dis output:", dis4.output_shape)
return dis4
示例3: build_discriminator_64
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def build_discriminator_64(image=None,ndf=128):
lrelu = LeakyRectify(0.2)
# input: images
InputImg = InputLayer(shape=(None, 3, 64, 64), input_var=image)
print ("Dis Img_input:", InputImg.output_shape)
# Conv Layer
dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
print ("Dis conv1:", dis1.output_shape)
# Conv Layer
dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv2:", dis2.output_shape)
# Conv Layer
dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv3:", dis3.output_shape)
# Conv Layer
dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv3:", dis4.output_shape)
# Conv Layer
dis5 = DenseLayer(dis4, 1, W=Normal(0.02), nonlinearity=sigmoid)
print ("Dis output:", dis5.output_shape)
return dis5
示例4: build_discriminator_128
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def build_discriminator_128(image=None,ndf=128):
lrelu = LeakyRectify(0.2)
# input: images
InputImg = InputLayer(shape=(None, 3, 128, 128), input_var=image)
print ("Dis Img_input:", InputImg.output_shape)
# Conv Layer
dis1 = Conv2DLayer(InputImg, ndf, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu)
print ("Dis conv1:", dis1.output_shape)
# Conv Layer
dis2 = batch_norm(Conv2DLayer(dis1, ndf*2, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv2:", dis2.output_shape)
# Conv Layer
dis3 = batch_norm(Conv2DLayer(dis2, ndf*4, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv3:", dis3.output_shape)
# Conv Layer
dis4 = batch_norm(Conv2DLayer(dis3, ndf*8, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv3:", dis4.output_shape)
# Conv Layer
dis5 = batch_norm(Conv2DLayer(dis4, ndf*16, (4,4), (2,2), pad=1, W=Normal(0.02), nonlinearity=lrelu))
print ("Dis conv4:", dis5.output_shape)
# Conv Layer
dis6 = DenseLayer(dis5, 1, W=Normal(0.02), nonlinearity=sigmoid)
print ("Dis output:", dis6.output_shape)
return dis6
示例5: instance_norm
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def instance_norm(layer, **kwargs):
"""
The equivalent of Lasagne's `batch_norm()` convenience method, but for instance normalization.
Refer: http://lasagne.readthedocs.io/en/latest/modules/layers/normalization.html#lasagne.layers.batch_norm
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
bn_name = (kwargs.pop('name', None) or
(getattr(layer, 'name', None) and layer.name + '_bn'))
layer = InstanceNormLayer(layer, name=bn_name, **kwargs)
if nonlinearity is not None:
nonlin_name = bn_name and bn_name + '_nonlin'
layer = NonlinearityLayer(layer, nonlinearity, name=nonlin_name)
return layer
# TODO: Add normalization
示例6: setup_discriminator
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def setup_discriminator(self):
c = args.discriminator_size
self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
nonlinearity=lasagne.nonlinearities.linear))
#------------------------------------------------------------------------------------------------------------------
# Input / Output
#------------------------------------------------------------------------------------------------------------------
示例7: build_critic
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def build_critic(input_var=None):
from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
DenseLayer)
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import LeakyRectify
lrelu = LeakyRectify(0.2)
# input: (None, 1, 28, 28)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
# two convolutions
layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
nonlinearity=lrelu))
# fully-connected layer
layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
# output layer (linear)
layer = DenseLayer(layer, 1, nonlinearity=None)
print ("critic output:", layer.output_shape)
return layer
示例8: build_critic
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def build_critic(input_var=None):
from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
DenseLayer)
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import LeakyRectify
lrelu = LeakyRectify(0.2)
# input: (None, 1, 28, 28)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
# two convolutions
layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
nonlinearity=lrelu))
# fully-connected layer
layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
# output layer (linear and without bias)
layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
print ("critic output:", layer.output_shape)
return layer
示例9: residual_block
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def residual_block(l, increase_dim=False, projection=True, first=False):
"""
Create a residual learning building block with two stacked 3x3 convlayers as in paper
'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
"""
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2, 2)
out_num_filters = input_num_filters * 2
else:
first_stride = (1, 1)
out_num_filters = input_num_filters
if first:
# hacky solution to keep layers correct
bn_pre_relu = l
else:
# contains the BN -> ReLU portion, steps 1 to 2
bn_pre_conv = BatchNormLayer(l)
bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)
# contains the weight -> BN -> ReLU portion, steps 3 to 5
conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride,
nonlinearity=rectify, pad='same', W=he_norm))
# contains the last weight portion, step 6
conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None,
pad='same', W=he_norm)
# add shortcut connections
if increase_dim:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
pad='same', b=None)
block = ElemwiseSumLayer([conv_2, projection])
else:
block = ElemwiseSumLayer([conv_2, l])
return block
示例10: ResNet_FullPreActivation
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
"""
Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
Formula to figure out depth: 6n + 2
"""
# Building the network
l_in = InputLayer(shape=input_shape, input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))
# first stack of residual blocks, output is 16 x 32 x 32
l = residual_block(l, first=True)
for _ in range(1, n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
bn_post_conv = BatchNormLayer(l)
bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)
# average pooling
avg_pool = GlobalPoolLayer(bn_post_relu)
# fully connected layer
network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)
return network
示例11: createCNN
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def createCNN(self):
net = {}
net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)
print("Input shape: {0}".format(net['input'].output_shape))
#STAGE 1
net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)
net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)
net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))
net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)
for i in range(1, self.nStages):
self.addDANStage(i + 1, net)
net['output'] = net['s' + str(self.nStages) + '_landmarks']
return net
示例12: createCNN
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def createCNN(self):
net = {}
net['input'] = lasagne.layers.InputLayer(shape=(None, self.nChannels, self.imageHeight, self.imageWidth), input_var=self.data)
print("Input shape: {0}".format(net['input'].output_shape))
#STAGE 1
net['s1_conv1_1'] = batch_norm(Conv2DLayer(net['input'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_conv1_2'] = batch_norm(Conv2DLayer(net['s1_conv1_1'], 64, 3, pad='same', W=GlorotUniform('relu')))
net['s1_pool1'] = lasagne.layers.Pool2DLayer(net['s1_conv1_2'], 2)
net['s1_conv2_1'] = batch_norm(Conv2DLayer(net['s1_pool1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv2_2'] = batch_norm(Conv2DLayer(net['s1_conv2_1'], 128, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool2'] = lasagne.layers.Pool2DLayer(net['s1_conv2_2'], 2)
net['s1_conv3_1'] = batch_norm (Conv2DLayer(net['s1_pool2'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv3_2'] = batch_norm (Conv2DLayer(net['s1_conv3_1'], 256, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool3'] = lasagne.layers.Pool2DLayer(net['s1_conv3_2'], 2)
net['s1_conv4_1'] = batch_norm(Conv2DLayer(net['s1_pool3'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_conv4_2'] = batch_norm (Conv2DLayer(net['s1_conv4_1'], 512, 3, pad=1, W=GlorotUniform('relu')))
net['s1_pool4'] = lasagne.layers.Pool2DLayer(net['s1_conv4_2'], 2)
net['s1_fc1_dropout'] = lasagne.layers.DropoutLayer(net['s1_pool4'], p=0.5)
net['s1_fc1'] = batch_norm(lasagne.layers.DenseLayer(net['s1_fc1_dropout'], num_units=256, W=GlorotUniform('relu')))
net['s1_output'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=136, nonlinearity=None)
net['s1_landmarks'] = LandmarkInitLayer(net['s1_output'], self.initLandmarks)
if self.confidenceLayer:
net['s1_confidence'] = lasagne.layers.DenseLayer(net['s1_fc1'], num_units=2, W=GlorotUniform('relu'), nonlinearity=lasagne.nonlinearities.softmax)
for i in range(1, self.nStages):
self.addDANStage(i + 1, net)
net['output'] = net['s' + str(self.nStages) + '_landmarks']
if self.confidenceLayer:
net['output'] = lasagne.layers.ConcatLayer([net['output'], net['s1_confidence']])
return net
示例13: batch_norm
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def batch_norm(layer):
if cfg.BATCH_NORM:
return l_batch_norm(layer)
else:
return layer
示例14: classificationBranch
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def classificationBranch(net, kernel_size):
# Post Convolution
branch = l.batch_norm(l.Conv2DLayer(net,
num_filters=int(FILTERS[-1] * RESNET_K),
filter_size=kernel_size,
nonlinearity=nl.rectify))
#log.p(("\t\tPOST CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
# Dropout Layer
branch = l.DropoutLayer(branch)
# Dense Convolution
branch = l.batch_norm(l.Conv2DLayer(branch,
num_filters=int(FILTERS[-1] * RESNET_K * 2),
filter_size=1,
nonlinearity=nl.rectify))
#log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
# Dropout Layer
branch = l.DropoutLayer(branch)
# Class Convolution
branch = l.Conv2DLayer(branch,
num_filters=len(cfg.CLASSES),
filter_size=1,
nonlinearity=None)
return branch
示例15: create_model
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import batch_norm [as 别名]
def create_model(self, X, Z, n_dim, n_out, n_chan=1):
# params
n_lat = 100 # latent variables
n_g_hid1 = 1024 # size of hidden layer in generator layer 1
n_g_hid2 = 128 # size of hidden layer in generator layer 2
n_out = n_dim * n_dim * n_chan # total dimensionality of output
if self.model == 'gaussian':
raise Exception('Gaussian variables currently nor supported in GAN')
# create the generator network
l_g_in = lasagne.layers.InputLayer(shape=(None, n_lat), input_var=Z)
l_g_hid1 = batch_norm(lasagne.layers.DenseLayer(l_g_in, n_g_hid1))
l_g_hid2 = batch_norm(lasagne.layers.DenseLayer(l_g_hid1, n_g_hid2*7*7))
l_g_hid2 = lasagne.layers.ReshapeLayer(l_g_hid2, ([0], n_g_hid2, 7, 7))
l_g_dc1 = batch_norm(Deconv2DLayer(l_g_hid2, 64, 5, stride=2, pad=2))
l_g = Deconv2DLayer(l_g_dc1, n_chan, 5, stride=2, pad=2,
nonlinearity=lasagne.nonlinearities.sigmoid)
print ("Generator output:", l_g.output_shape)
# create the discriminator network
lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
l_d_in = lasagne.layers.InputLayer(shape=(None, n_chan, n_dim, n_dim),
input_var=X)
l_d_hid1 = batch_norm(lasagne.layers.Conv2DLayer(
l_d_in, num_filters=64, filter_size=5, stride=2, pad=2,
nonlinearity=lrelu, name='l_d_hid1'))
l_d_hid2 = batch_norm(lasagne.layers.Conv2DLayer(
l_d_hid1, num_filters=128, filter_size=5, stride=2, pad=2,
nonlinearity=lrelu, name='l_d_hid2'))
l_d_hid3 = batch_norm(lasagne.layers.DenseLayer(l_d_hid2, 1024, nonlinearity=lrelu))
l_d = lasagne.layers.DenseLayer(l_d_hid3, 1, nonlinearity=lasagne.nonlinearities.sigmoid)
print ("Discriminator output:", l_d.output_shape)
return l_g, l_d