当前位置: 首页>>代码示例>>Python>>正文


Python layers.BatchNormLayer方法代码示例

本文整理汇总了Python中lasagne.layers.BatchNormLayer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.BatchNormLayer方法的具体用法?Python layers.BatchNormLayer怎么用?Python layers.BatchNormLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lasagne.layers的用法示例。


在下文中一共展示了layers.BatchNormLayer方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dense_block

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def dense_block(network, num_layers, growth_rate, dropout, name_prefix):
    # concatenated 3x3 convolutions
    for n in range(num_layers):
        conv = affine_relu_conv(network, channels=growth_rate,
                                filter_size=3, dropout=dropout,
                                name_prefix=name_prefix + '_l%02d' % (n + 1))
        conv = BatchNormLayer(conv, name=name_prefix + '_l%02dbn' % (n + 1),
                              beta=None, gamma=None)
        network = ConcatLayer([network, conv], axis=1,
                              name=name_prefix + '_l%02d_join' % (n + 1))
    return network 
开发者ID:Lasagne,项目名称:Recipes,代码行数:13,代码来源:densenet_fast.py

示例2: transition

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def transition(network, dropout, name_prefix):
    # a transition 1x1 convolution followed by avg-pooling
    network = affine_relu_conv(network, channels=network.output_shape[1],
                               filter_size=1, dropout=dropout,
                               name_prefix=name_prefix)
    network = Pool2DLayer(network, 2, mode='average_inc_pad',
                          name=name_prefix + '_pool')
    network = BatchNormLayer(network, name=name_prefix + '_bn',
                             beta=None, gamma=None)
    return network 
开发者ID:Lasagne,项目名称:Recipes,代码行数:12,代码来源:densenet_fast.py

示例3: bn_relu_conv

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def bn_relu_conv(network, channels, filter_size, dropout, name_prefix):
    network = BatchNormLayer(network, name=name_prefix + '_bn')
    network = NonlinearityLayer(network, nonlinearity=rectify,
                                name=name_prefix + '_relu')
    network = Conv2DLayer(network, channels, filter_size, pad='same',
                          W=lasagne.init.HeNormal(gain='relu'),
                          b=None, nonlinearity=None,
                          name=name_prefix + '_conv')
    if dropout:
        network = DropoutLayer(network, dropout)
    return network 
开发者ID:Lasagne,项目名称:Recipes,代码行数:13,代码来源:densenet.py

示例4: residual_block

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def residual_block(l, increase_dim=False, projection=True, first=False):
    """
    Create a residual learning building block with two stacked 3x3 convlayers as in paper
    'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
    """
    input_num_filters = l.output_shape[1]
    if increase_dim:
        first_stride = (2, 2)
        out_num_filters = input_num_filters * 2
    else:
        first_stride = (1, 1)
        out_num_filters = input_num_filters

    if first:
        # hacky solution to keep layers correct
        bn_pre_relu = l
    else:
        # contains the BN -> ReLU portion, steps 1 to 2
        bn_pre_conv = BatchNormLayer(l)
        bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

    # contains the weight -> BN -> ReLU portion, steps 3 to 5
    conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride,
                                  nonlinearity=rectify, pad='same', W=he_norm))

    # contains the last weight portion, step 6
    conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None,
                       pad='same', W=he_norm)

    # add shortcut connections
    if increase_dim:
        # projection shortcut, as option B in paper
        projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
                               pad='same', b=None)
        block = ElemwiseSumLayer([conv_2, projection])
    else:
        block = ElemwiseSumLayer([conv_2, l])

    return block 
开发者ID:CPJKU,项目名称:dcase_task2,代码行数:41,代码来源:res_net_blocks.py

示例5: ResNet_FullPreActivation

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
    """
    Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
    Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)

    Formula to figure out depth: 6n + 2
    """

    # Building the network
    l_in = InputLayer(shape=input_shape, input_var=input_var)

    # first layer, output is 16 x 32 x 32
    l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))

    # first stack of residual blocks, output is 16 x 32 x 32
    l = residual_block(l, first=True)
    for _ in range(1, n):
        l = residual_block(l)

    # second stack of residual blocks, output is 32 x 16 x 16
    l = residual_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_block(l)

    # third stack of residual blocks, output is 64 x 8 x 8
    l = residual_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_block(l)

    bn_post_conv = BatchNormLayer(l)
    bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)

    # average pooling
    avg_pool = GlobalPoolLayer(bn_post_relu)

    # fully connected layer
    network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)

    return network 
开发者ID:CPJKU,项目名称:dcase_task2,代码行数:41,代码来源:res_net_blocks.py

示例6: get_output_for

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        # If the BN vars shall be updates as before, redirect to the parent
        # implementation.
        if not isinstance(batch_norm_update_averages, dict):
            return super(BatchNormLayer, self).get_output_for(
                input, deterministic, batch_norm_use_averages,
                batch_norm_update_averages, **kwargs)
        else:
            input_mean = input.mean(self.axes)
            input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

            # Decide whether to use the stored averages or mini-batch statistics
            if batch_norm_use_averages is None:
                batch_norm_use_averages = deterministic
            use_averages = batch_norm_use_averages

            if use_averages:
                mean = self.mean
                inv_std = self.inv_std
            else:
                mean = input_mean
                inv_std = input_inv_std

            # Instead of automatically updating the averages, we add the update
            # ops to a dictionary.
            update_averages = batch_norm_update_averages
            if isinstance(update_averages, dict):
                update_averages[self.mean] = ((1 - self.alpha) * self.mean +
                                              self.alpha * input_mean)
                update_averages[self.inv_std] = ((1 - self.alpha) *
                                                 self.inv_std + self.alpha *
                                                 input_inv_std)

            # prepare dimshuffle pattern inserting broadcastable axes as needed
            param_axes = iter(range(input.ndim - len(self.axes)))
            pattern = ['x' if input_axis in self.axes
                       else next(param_axes)
                       for input_axis in range(input.ndim)]

            # apply dimshuffle pattern to all parameters
            beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
            gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
            mean = mean.dimshuffle(pattern)
            inv_std = inv_std.dimshuffle(pattern)

            # normalize
            normalized = (input - mean) * (gamma * inv_std) + beta
            return normalized 
开发者ID:TobyPDE,项目名称:FRRN,代码行数:52,代码来源:layers.py

示例7: residual_bottleneck_block

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def residual_bottleneck_block(l, increase_dim=False, first=False):
    """
    Create a residual learning building block with two stacked 3x3 conv layers as in paper
    'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
    """
    input_num_filters = l.output_shape[1]

    if increase_dim:
        first_stride = (2, 2)
        out_num_filters = input_num_filters * 2
    else:
        first_stride = (1, 1)
        out_num_filters = input_num_filters

    if first:
        # hacky solution to keep layers correct
        bn_pre_relu = l
        out_num_filters = out_num_filters * 4
    else:
        # contains the BN -> ReLU portion, steps 1 to 2
        bn_pre_conv = BatchNormLayer(l)
        bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

    bottleneck_filters = out_num_filters / 4

    # contains the weight -> BN -> ReLU portion, steps 3 to 5
    conv_1 = batch_norm(
        ConvLayer(bn_pre_relu, num_filters=bottleneck_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=rectify,
                  pad='same', W=he_norm))

    conv_2 = batch_norm(
        ConvLayer(conv_1, num_filters=bottleneck_filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify,
                  pad='same', W=he_norm))

    # contains the last weight portion, step 6
    conv_3 = ConvLayer(conv_2, num_filters=out_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None,
                       pad='same', W=he_norm)

    if increase_dim:
        # projection shortcut, as option B in paper
        projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
                               pad='same', b=None)
        block = ElemwiseSumLayer([conv_3, projection])

    elif first:
        # projection shortcut, as option B in paper
        projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None,
                               pad='same', b=None)
        block = ElemwiseSumLayer([conv_3, projection])

    else:
        block = ElemwiseSumLayer([conv_3, l])

    return block 
开发者ID:CPJKU,项目名称:dcase_task2,代码行数:56,代码来源:res_net_blocks.py

示例8: residual_wide_block

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def residual_wide_block(l, increase_dim=False, projection=True, first=False, filters=16):
    """ Create a residual learning building block with two stacked 3x3 conv layers as in paper """
    if increase_dim:
        first_stride = (2, 2)
    else:
        first_stride = (1, 1)

    if first:
        # hacky solution to keep layers correct
        bn_pre_relu = l
    else:
        # contains the BN -> ReLU portion, steps 1 to 2
        bn_pre_conv = BatchNormLayer(l)
        bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)

    # contains the weight -> BN -> ReLU portion, steps 3 to 5
    conv_1 = batch_norm(
        ConvLayer(bn_pre_relu, num_filters=filters, filter_size=(3, 3), stride=first_stride, nonlinearity=rectify,
                  pad='same', W=he_norm))

    dropout = DropoutLayer(conv_1, p=0.3)

    # contains the last weight portion, step 6
    conv_2 = ConvLayer(dropout, num_filters=filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None, pad='same',
                       W=he_norm)

    # add shortcut connections
    if increase_dim:
        # projection shortcut, as option B in paper
        projection = ConvLayer(l, num_filters=filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None, pad='same',
                               b=None)
        block = ElemwiseSumLayer([conv_2, projection])

    elif first:
        # projection shortcut, as option B in paper
        projection = ConvLayer(l, num_filters=filters, filter_size=(1, 1), stride=(1, 1), nonlinearity=None, pad='same',
                               b=None)
        block = ElemwiseSumLayer([conv_2, projection])

    else:
        block = ElemwiseSumLayer([conv_2, l])

    return block 
开发者ID:CPJKU,项目名称:dcase_task2,代码行数:45,代码来源:res_net_blocks.py

示例9: ResNet_BottleNeck_FullPreActivation

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def ResNet_BottleNeck_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
    '''
    Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
    Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)

    Judging from https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua.
    Number of filters go 16 -> 64 -> 128 -> 256

    Forumala to figure out depth: 9n + 2
    '''

    # Building the network
    l_in = InputLayer(shape=input_shape, input_var=input_var)

    # first layer, output is 16x16x16
    l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))

    # first stack of residual blocks, output is 64x16x16
    l = residual_bottleneck_block(l, first=True)
    for _ in range(1, n):
        l = residual_bottleneck_block(l)

    # second stack of residual blocks, output is 128x8x8
    l = residual_bottleneck_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_bottleneck_block(l)

    # third stack of residual blocks, output is 256x4x4
    l = residual_bottleneck_block(l, increase_dim=True)
    for _ in range(1, n):
        l = residual_bottleneck_block(l)

    bn_post_conv = BatchNormLayer(l)
    bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)

    # average pooling
    avg_pool = GlobalPoolLayer(bn_post_relu)

    # fully connected layer
    network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)

    return network 
开发者ID:CPJKU,项目名称:dcase_task2,代码行数:44,代码来源:res_net_blocks.py

示例10: build_model

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice:difference"]

    l0 = InputLayer(input_size)
    # add channel layer
    # l0r = reshape(l0, (-1, 1, ) + input_size[1:])

    # (batch, channel, time, x, y)

    l = ConvolutionOver2DAxisLayer(l0, num_filters=40, filter_size=(5, 5),
                                     axis=(2,3), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     nonlinearity=lasagne.nonlinearities.identity
                                     )

    l = BatchNormLayer(l, gamma=None)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    l = MaxPoolOver2DAxisLayer(l, pool_size=(2, 2), axis=(2,3), stride=(2,2))

    l = ConvolutionOver2DAxisLayer(l, num_filters=40, filter_size=(3, 3),
                                     axis=(2,3), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     nonlinearity=lasagne.nonlinearities.identity
                                     )
    l = BatchNormLayer(l, gamma=None)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    l = MaxPoolOver2DAxisLayer(l, pool_size=(2, 2), axis=(2,3), stride=(2,2))
    l_systole = lasagne.layers.DenseLayer(lasagne.layers.DropoutLayer(l),
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax)

    l_diastole = lasagne.layers.DenseLayer(lasagne.layers.DropoutLayer(l),
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax)

    return {
        "inputs":{
            "sliced:data:singleslice:difference": l0
        },
        "outputs": {
            "systole:onehot": l_systole,
            "diastole:onehot": l_diastole,
        }
    } 
开发者ID:317070,项目名称:kaggle-heart,代码行数:52,代码来源:j0_mxnet.py

示例11: build_model

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice:difference:middle"]

    l0 = InputLayer(input_size)
    # add channel layer
    # l0r = reshape(l0, (-1, 1, ) + input_size[1:])

    # (batch, channel, time, x, y)

    l = ConvolutionOver2DAxisLayer(l0, num_filters=40, filter_size=(5, 5),
                                     axis=(2,3), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     nonlinearity=lasagne.nonlinearities.identity
                                     )

    l = BatchNormLayer(l, gamma=None)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    l = MaxPoolOver2DAxisLayer(l, pool_size=(2, 2), axis=(2,3), stride=(2,2))

    l = ConvolutionOver2DAxisLayer(l, num_filters=40, filter_size=(3, 3),
                                     axis=(2,3), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     nonlinearity=lasagne.nonlinearities.identity
                                     )
    l = BatchNormLayer(l, gamma=None)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    l = MaxPoolOver2DAxisLayer(l, pool_size=(2, 2), axis=(2,3), stride=(2,2))
    l_systole = lasagne.layers.DenseLayer(lasagne.layers.DropoutLayer(l),
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax)

    l_diastole = lasagne.layers.DenseLayer(lasagne.layers.DropoutLayer(l),
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax)

    return {
        "inputs":{
            "sliced:data:singleslice:difference": l0
        },
        "outputs": {
            "systole:onehot": l_systole,
            "diastole:onehot": l_diastole,
        }
    } 
开发者ID:317070,项目名称:kaggle-heart,代码行数:52,代码来源:j0_mxnet1.py

示例12: build_model

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import BatchNormLayer [as 别名]
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice:difference:middle"]

    l0 = InputLayer(input_size)
    # add channel layer
    # l0r = reshape(l0, (-1, 1, ) + input_size[1:])

    # (batch, channel, time, x, y)

    l = ConvolutionOver2DAxisLayer(l0, num_filters=40, filter_size=(5, 5),
                                     axis=(2,3), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     nonlinearity=lasagne.nonlinearities.identity
                                     )

    l = BatchNormLayer(l, gamma=None)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    l = MaxPoolOver2DAxisLayer(l, pool_size=(2, 2), axis=(2,3), stride=(2,2))

    l = ConvolutionOver2DAxisLayer(l, num_filters=40, filter_size=(3, 3),
                                     axis=(2,3), channel=1,
                                     W=lasagne.init.Orthogonal(),
                                     b=lasagne.init.Constant(0.1),
                                     nonlinearity=lasagne.nonlinearities.identity
                                     )
    l = BatchNormLayer(l, gamma=None)
    l = lasagne.layers.NonlinearityLayer(l, nonlinearity=lasagne.nonlinearities.rectify)
    l = MaxPoolOver2DAxisLayer(l, pool_size=(2, 2), axis=(2,3), stride=(2,2))
    l_systole = lasagne.layers.DenseLayer(lasagne.layers.DropoutLayer(l),
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax)


    l_diastole = lasagne.layers.DenseLayer(lasagne.layers.DropoutLayer(l),
                              num_units=600,
                              nonlinearity=lasagne.nonlinearities.softmax)

    return {
        "inputs":{
            "sliced:data:singleslice:difference": l0
        },
        "outputs": {
            "systole:onehot": l_systole,
            "diastole:onehot": l_diastole,
        }
    } 
开发者ID:317070,项目名称:kaggle-heart,代码行数:53,代码来源:j0_mxnet1b.py


注:本文中的lasagne.layers.BatchNormLayer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。