當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.BatchNorm方法代碼示例

本文整理匯總了Python中mxnet.gluon.nn.BatchNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.BatchNorm方法的具體用法?Python nn.BatchNorm怎麽用?Python nn.BatchNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.BatchNorm方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_fill_shape_load

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_fill_shape_load():
    ctx = mx.context.current_context()
    net1 = nn.HybridSequential()
    with net1.name_scope():
        net1.add(nn.Conv2D(64, kernel_size=2, padding=1),
                 nn.BatchNorm(),
                 nn.Dense(10))
    net1.hybridize()
    net1.initialize(ctx=ctx)
    net1(mx.nd.ones((2,3,5,7), ctx))
    net1.save_parameters('net_fill.params')

    net2 = nn.HybridSequential()
    with net2.name_scope():
        net2.add(nn.Conv2D(64, kernel_size=2, padding=1),
                 nn.BatchNorm(),
                 nn.Dense(10))
    net2.hybridize()
    net2.initialize()
    net2.load_parameters('net_fill.params', ctx)
    assert net2[0].weight.shape[1] == 3, net2[0].weight.shape[1]
    assert net2[1].gamma.shape[0] == 64, net2[1].gamma.shape[0]
    assert net2[2].weight.shape[1] == 3072, net2[2].weight.shape[1] 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:test_gluon.py

示例2: test_slice_batchnorm

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_slice_batchnorm():
    class Net(gluon.HybridBlock):
        def __init__(self, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.conv0 = nn.Conv2D(128, (1, 1))
                self.bn0 = nn.BatchNorm()
                self.slice = slice

        def hybrid_forward(self, F, x):
            x_in = self.conv0(x)
            x_slice = x_in.slice(begin=tuple(self.slice[0]),
                              end=tuple(self.slice[1]))
            out = self.bn0(x_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    slice = [[0, 0, 0, 0], [4, 32, 32, 32]]
    net = Net(slice)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:22,代碼來源:test_gluon.py

示例3: test_slice_batchnorm_slice_batchnorm

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_slice_batchnorm_slice_batchnorm():
    class Net(gluon.HybridBlock):
        def __init__(self, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.conv0 = nn.Conv2D(128, (1, 1))
                self.bn0 = nn.BatchNorm()
                self.bn1 = nn.BatchNorm()
                self.slice = slice

        def hybrid_forward(self, F, x):
            x_in = self.conv0(x)
            x_slice = x_in.slice(begin=tuple(self.slice[0][0]), end=tuple(self.slice[0][1]))
            y = self.bn0(x_slice)
            y_slice = y.slice(begin=tuple(self.slice[1][0]), end=tuple(self.slice[1][1]))
            out = self.bn1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    slice = [[[0, 0, 0, 0], [4, 32, 32, 32]], [[0, 0, 0, 0], [2, 64, 16, 16]]]
    net = Net(slice)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:24,代碼來源:test_gluon.py

示例4: test_reshape_batchnorm_reshape_batchnorm

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_reshape_batchnorm_reshape_batchnorm():
    class Net(gluon.HybridBlock):
        def __init__(self, shape, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.conv0 = nn.Conv2D(128, (1, 1))
                self.bn0 = nn.BatchNorm()
                self.bn1 = nn.BatchNorm()
                self.reshape = shape

        def hybrid_forward(self, F, x):
            x_in = self.conv0(x)
            x_reshape = x_in.reshape(self.reshape[0])
            y = self.bn0(x_reshape)
            y_reshape = y.reshape(self.reshape[1])
            out = self.bn1(y_reshape)
            return out

    x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
    shape = [(4, 64, 64, -1), (4, 128, -1, 32)]
    net = Net(shape)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:24,代碼來源:test_gluon.py

示例5: test_slice_batchnorm_reshape_batchnorm

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_slice_batchnorm_reshape_batchnorm():
    class Net(gluon.HybridBlock):
        def __init__(self, shape, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.conv0 = nn.Conv2D(128, (1, 1))
                self.bn0 = nn.BatchNorm()
                self.bn1 = nn.BatchNorm()
                self.reshape = shape
                self.slice = slice

        def hybrid_forward(self, F, x):
            x_in = self.conv0(x)
            x_slice = x_in.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
            y = self.bn0(x_slice)
            y_reshape = y.reshape(self.reshape)
            out = self.bn1(y_reshape)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    slice = [[0, 0, 0, 0], [4, 32, 32, 32]]
    shape = (1, 128, 64, -1)
    net = Net(shape, slice)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:26,代碼來源:test_gluon.py

示例6: test_reshape_batchnorm_slice_batchnorm

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_reshape_batchnorm_slice_batchnorm():
    class Net(gluon.HybridBlock):
        def __init__(self, shape, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.conv0 = nn.Conv2D(128, (1, 1))
                self.bn0 = nn.BatchNorm()
                self.bn1 = nn.BatchNorm()
                self.reshape = shape
                self.slice = slice

        def hybrid_forward(self, F, x):
            x_in = self.conv0(x)
            x_reshape = x_in.reshape(self.reshape)
            y = self.bn0(x_reshape)
            y_slice = y.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
            out = self.bn1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
    slice = [[0, 0, 0, 0], [2, 64, 32, 32]]
    shape = (4, 64, 64, -1)
    net = Net(shape, slice)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:26,代碼來源:test_gluon.py

示例7: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
開發者ID:deepinsight,項目名稱:insightocr,代碼行數:22,代碼來源:fnasnet.py

示例8: _make_dense_layer

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
    out.add(gluon.contrib.nn.Identity())
    out.add(new_features)

    return out 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:20,代碼來源:fdensenet.py

示例9: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:27,代碼來源:fdensenet.py

示例10: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def __init__(self, depth, ctx, pretrained=True, num_classes=0):
        super(ResNet, self).__init__()
        self.pretrained = pretrained

        with self.name_scope():
            network = ResNet.__factory[depth](pretrained=pretrained, ctx=ctx).features[0:-1]
            network[-1][0].body[0]._kwargs['stride'] = (1, 1)
            network[-1][0].downsample[0]._kwargs['stride'] = (1, 1)
            self.base = nn.HybridSequential()
            for n in network:
                self.base.add(n)

            self.avgpool = nn.GlobalAvgPool2D()
            self.flatten = nn.Flatten()
            self.bn = nn.BatchNorm(center=False, scale=True)
            self.bn.initialize(init=init.Zero(), ctx=ctx)

            self.classifier = nn.Dense(num_classes, use_bias=False)
            self.classifier.initialize(init=init.Normal(0.001), ctx=ctx) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:21,代碼來源:resnet.py

示例11: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def __init__(self):
        super(SRGenerator, self).__init__()
        self.conv1 = nn.Conv2D(64, kernel_size=3, strides=1,padding=1,activation='relu')
        self.res_block = nn.HybridSequential()
        with self.name_scope():
            for i in range(16):
                self.res_block.add(
                    ResnetBlock()
                )

            self.res_block.add(
                nn.Conv2D(64, kernel_size=3, strides=1,padding=1,use_bias=False),
                nn.BatchNorm()
            )
        self.subpix_block1 = SubpixelBlock()
        self.subpix_block2 = SubpixelBlock()
        self.conv4 = nn.Conv2D(3,kernel_size=1,strides=1,activation='tanh') 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:19,代碼來源:train_srgan.py

示例12: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def __init__(self, channels, size1=14, scale=(1, 2, 1),
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(AttentionModule_stage3, self).__init__(**kwargs)
        p, t, r = scale
        with self.name_scope():
            self.first_residual_blocks = nn.HybridSequential()
            _add_block(self.first_residual_blocks, ResidualBlock, p, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.trunk_branches = nn.HybridSequential()
            _add_block(self.trunk_branches, ResidualBlock, t, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.mpool1 = nn.MaxPool2D(pool_size=3, strides=2, padding=1)

            self.softmax1_blocks = nn.HybridSequential()
            _add_block(self.softmax1_blocks, ResidualBlock, 2 * r, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.interpolation1 = UpsamplingBilinear2d(size=size1)

            self.softmax2_blocks = nn.HybridSequential()
            _add_sigmoid_layer(self.softmax2_blocks, channels, norm_layer, norm_kwargs)

            self.last_blocks = ResidualBlock(channels) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:27,代碼來源:residual_attentionnet.py

示例13: _make_features

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def _make_features(self, layers, filters, batch_norm):
        featurizer = mx.gluon.nn.HybridSequential(prefix='')
        for i, num in enumerate(layers):
            for _ in range(num):
                featurizer.add(Conv2D(filters[i], kernel_size=3, padding=1,
                                         weight_initializer=Xavier(rnd_type='gaussian',
                                                                   factor_type='out',
                                                                   magnitude=2),
                                         bias_initializer='zeros'))
                if batch_norm:
                    featurizer.add(BatchNorm())
                featurizer.add(Activation('relu'))
            featurizer.add(MaxPool2D(strides=2))
        return featurizer 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:16,代碼來源:vgg.py

示例14: test_batchnorm

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_batchnorm():
    layer = nn.BatchNorm(in_channels=10)
    check_layer_forward(layer, (2, 10, 10, 10)) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:5,代碼來源:test_gluon.py

示例15: test_fill_shape_deferred

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import BatchNorm [as 別名]
def test_fill_shape_deferred():
    net = nn.HybridSequential()
    with net.name_scope():
        net.add(nn.Conv2D(64, kernel_size=2, padding=1),
                nn.BatchNorm(),
                nn.Dense(10))
    net.hybridize()
    net.initialize()
    net(mx.nd.ones((2,3,5,7)))
    assert net[0].weight.shape[1] == 3, net[0].weight.shape[1]
    assert net[1].gamma.shape[0] == 64, net[1].gamma.shape[0]
    assert net[2].weight.shape[1] == 3072, net[2].weight.shape[1] 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:14,代碼來源:test_gluon.py


注:本文中的mxnet.gluon.nn.BatchNorm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。