本文整理汇总了Python中lasagne.layers.NonlinearityLayer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.NonlinearityLayer方法的具体用法?Python layers.NonlinearityLayer怎么用?Python layers.NonlinearityLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.layers
的用法示例。
在下文中一共展示了layers.NonlinearityLayer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: instance_norm
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def instance_norm(layer, **kwargs):
"""
The equivalent of Lasagne's `batch_norm()` convenience method, but for instance normalization.
Refer: http://lasagne.readthedocs.io/en/latest/modules/layers/normalization.html#lasagne.layers.batch_norm
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
bn_name = (kwargs.pop('name', None) or
(getattr(layer, 'name', None) and layer.name + '_bn'))
layer = InstanceNormLayer(layer, name=bn_name, **kwargs)
if nonlinearity is not None:
nonlin_name = bn_name and bn_name + '_nonlin'
layer = NonlinearityLayer(layer, nonlinearity, name=nonlin_name)
return layer
# TODO: Add normalization
示例2: residual_block
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def residual_block(l, increase_dim=False, projection=True, first=False):
"""
Create a residual learning building block with two stacked 3x3 convlayers as in paper
'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
"""
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2, 2)
out_num_filters = input_num_filters * 2
else:
first_stride = (1, 1)
out_num_filters = input_num_filters
if first:
# hacky solution to keep layers correct
bn_pre_relu = l
else:
# contains the BN -> ReLU portion, steps 1 to 2
bn_pre_conv = BatchNormLayer(l)
bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)
# contains the weight -> BN -> ReLU portion, steps 3 to 5
conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride,
nonlinearity=rectify, pad='same', W=he_norm))
# contains the last weight portion, step 6
conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None,
pad='same', W=he_norm)
# add shortcut connections
if increase_dim:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
pad='same', b=None)
block = ElemwiseSumLayer([conv_2, projection])
else:
block = ElemwiseSumLayer([conv_2, l])
return block
示例3: ResNet_FullPreActivation
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
"""
Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
Formula to figure out depth: 6n + 2
"""
# Building the network
l_in = InputLayer(shape=input_shape, input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))
# first stack of residual blocks, output is 16 x 32 x 32
l = residual_block(l, first=True)
for _ in range(1, n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
bn_post_conv = BatchNormLayer(l)
bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)
# average pooling
avg_pool = GlobalPoolLayer(bn_post_relu)
# fully connected layer
network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)
return network
示例4: ResLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def ResLayer(incoming, IB):
return NL(ESL([IB,incoming]),elu)
示例5: get_output_for
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
return self.p*input
else:
return theano.ifelse.ifelse(
T.lt(self._srng.uniform( (1,), 0, 1)[0], self.p),
input,
T.zeros(input.shape)
)
# def ResDrop(incoming, IB, p):
# return NL(ESL([IfElseDropLayer(IB,survival_p=p),incoming]),elu)
示例6: ResDropNoPre
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def ResDropNoPre(incoming, IB, p):
return NL(ESL([IfElseDropLayer(IB,survival_p=p),incoming]),elu)
示例7: ResDrop
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def ResDrop(incoming, IB, p):
return NL(ESL([IfElseDropLayer(IB,survival_p=p),incoming]),elu)
示例8: ResLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def ResLayer(incoming, IB):
return NL(ESL([IB,incoming]),elu)
# If-else Drop Layer, adopted from Christopher Beckham's recipe:
# https://github.com/Lasagne/Recipes/pull/67
示例9: batch_nmsp
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = nonlinearities.identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
layer = BatchNormSparseLayer(layer, beta=beta, **kwargs)
if nonlinearity is not None:
from lasagne.layers import NonlinearityLayer
layer = NonlinearityLayer(layer, nonlinearity)
return layer
示例10: build_model
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def build_model(input_var):
net = {}
net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var)
net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False)
net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False)
net['pool1'] = PoolLayer(net['conv1_2'], 2)
net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False)
net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False)
net['pool2'] = PoolLayer(net['conv2_2'], 2)
net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False)
net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False)
net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False)
net['pool3'] = PoolLayer(net['conv3_3'], 2)
net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False)
net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False)
net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False)
net['pool4'] = PoolLayer(net['conv4_3'], 2)
net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False)
net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False)
net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False)
net['pool5'] = PoolLayer(net['conv5_3'], 2)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096)
net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net
示例11: MDBLOCK
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def MDBLOCK(incoming,num_filters,scales,name,nonlinearity):
return NL(BN(ESL([incoming,
MDCL(NL(BN(MDCL(NL(BN(incoming,name=name+'bnorm0'),nonlinearity),num_filters,scales,name),name=name+'bnorm1'),nonlinearity),
num_filters,
scales,
name+'2')]),name=name+'bnorm2'),nonlinearity)
# Gaussian Sample Layer for VAE from Tencia Lee
示例12: InceptionUpscaleLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def InceptionUpscaleLayer(incoming,param_dict,block_name):
branch = [0]*len(param_dict)
# Loop across branches
for i,dict in enumerate(param_dict):
for j,style in enumerate(dict['style']): # Loop up branch
branch[i] = TC2D(
incoming = branch[i] if j else incoming,
num_filters = dict['num_filters'][j],
filter_size = dict['filter_size'][j],
crop = dict['pad'][j] if 'pad' in dict else None,
stride = dict['stride'][j],
W = initmethod('relu'),
nonlinearity = dict['nonlinearity'][j],
name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
else NL(
incoming = lasagne.layers.dnn.Pool2DDNNLayer(
incoming = lasagne.layers.Upscale2DLayer(
incoming=incoming if j == 0 else branch[i],
scale_factor = dict['stride'][j]),
pool_size = dict['filter_size'][j],
stride = [1,1],
mode = dict['mode'][j],
pad = dict['pad'][j],
name = block_name+'_'+str(i)+'_'+str(j)),
nonlinearity = dict['nonlinearity'][j])
# Apply Batchnorm
branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
# Concatenate Sublayers
return CL(incomings=branch,name=block_name)
# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
示例13: ResLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def ResLayer(incoming, IB,nonlinearity):
return NL(ESL([IB,incoming]),nonlinearity)
# Inverse autoregressive flow layer
示例14: build_model
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def build_model():
'''
Builds C3D model
Returns
-------
dict
A dictionary containing the network layers, where the output layer is at key 'prob'
'''
net = {}
net['input'] = InputLayer((None, 3, 16, 112, 112))
# ----------- 1st layer group ---------------
net['conv1a'] = Conv3DDNNLayer(net['input'], 64, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify,flip_filters=False)
net['pool1'] = MaxPool3DDNNLayer(net['conv1a'],pool_size=(1,2,2),stride=(1,2,2))
# ------------- 2nd layer group --------------
net['conv2a'] = Conv3DDNNLayer(net['pool1'], 128, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
net['pool2'] = MaxPool3DDNNLayer(net['conv2a'],pool_size=(2,2,2),stride=(2,2,2))
# ----------------- 3rd layer group --------------
net['conv3a'] = Conv3DDNNLayer(net['pool2'], 256, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
net['conv3b'] = Conv3DDNNLayer(net['conv3a'], 256, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
net['pool3'] = MaxPool3DDNNLayer(net['conv3b'],pool_size=(2,2,2),stride=(2,2,2))
# ----------------- 4th layer group --------------
net['conv4a'] = Conv3DDNNLayer(net['pool3'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
net['conv4b'] = Conv3DDNNLayer(net['conv4a'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
net['pool4'] = MaxPool3DDNNLayer(net['conv4b'],pool_size=(2,2,2),stride=(2,2,2))
# ----------------- 5th layer group --------------
net['conv5a'] = Conv3DDNNLayer(net['pool4'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
net['conv5b'] = Conv3DDNNLayer(net['conv5a'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
# We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
net['pad'] = PadLayer(net['conv5b'],width=[(0,1),(0,1)], batch_ndim=3)
net['pool5'] = MaxPool3DDNNLayer(net['pad'],pool_size=(2,2,2),pad=(0,0,0),stride=(2,2,2))
net['fc6-1'] = DenseLayer(net['pool5'], num_units=4096,nonlinearity=lasagne.nonlinearities.rectify)
net['fc7-1'] = DenseLayer(net['fc6-1'], num_units=4096,nonlinearity=lasagne.nonlinearities.rectify)
net['fc8-1'] = DenseLayer(net['fc7-1'], num_units=487, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8-1'], softmax)
return net
示例15: build_model
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import NonlinearityLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
net['conv1'] = ConvLayer(net['input'],
num_filters=96,
filter_size=7,
stride=2,
flip_filters=False)
# caffe has alpha = alpha * pool_size
net['norm1'] = NormLayer(net['conv1'], alpha=0.0001)
net['pool1'] = PoolLayer(net['norm1'],
pool_size=3,
stride=3,
ignore_border=False)
net['conv2'] = ConvLayer(net['pool1'],
num_filters=256,
filter_size=5,
flip_filters=False)
net['pool2'] = PoolLayer(net['conv2'],
pool_size=2,
stride=2,
ignore_border=False)
net['conv3'] = ConvLayer(net['pool2'],
num_filters=512,
filter_size=3,
pad=1,
flip_filters=False)
net['conv4'] = ConvLayer(net['conv3'],
num_filters=512,
filter_size=3,
pad=1,
flip_filters=False)
net['conv5'] = ConvLayer(net['conv4'],
num_filters=512,
filter_size=3,
pad=1,
flip_filters=False)
net['pool5'] = PoolLayer(net['conv5'],
pool_size=3,
stride=3,
ignore_border=False)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net