當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.AvgPool2D方法代碼示例

本文整理匯總了Python中mxnet.gluon.nn.AvgPool2D方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.AvgPool2D方法的具體用法?Python nn.AvgPool2D怎麽用?Python nn.AvgPool2D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.AvgPool2D方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
開發者ID:deepinsight,項目名稱:insightocr,代碼行數:22,代碼來源:fnasnet.py

示例2: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:27,代碼來源:fdensenet.py

示例3: _make_branch

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def _make_branch(use_pool, norm_layer, norm_kwargs, *conv_settings):
    out = nn.HybridSequential(prefix='')
    if use_pool == 'avg':
        out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1))
    elif use_pool == 'max':
        out.add(nn.MaxPool2D(pool_size=3, strides=1, padding=1))
    setting_names = ['in_channels', 'channels', 'kernel_size', 'strides', 'padding']
    for setting in conv_settings:
        kwargs = {}
        for i, value in enumerate(setting):
            if value is not None:
                if setting_names[i] == 'in_channels':
                    in_channels = value
                elif setting_names[i] == 'channels':
                    channels = value
                else:
                    kwargs[setting_names[i]] = value
        out.add(_make_basic_conv(in_channels, channels, norm_layer, norm_kwargs, **kwargs))
    return out 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:21,代碼來源:googlenet.py

示例4: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self,
                 in_channels,
                 classes,
                 **kwargs):
        super(MSDClassifier, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix="")
            self.features.add(conv3x3_block(
                in_channels=in_channels,
                out_channels=in_channels,
                strides=2))
            self.features.add(conv3x3_block(
                in_channels=in_channels,
                out_channels=in_channels,
                strides=2))
            self.features.add(nn.AvgPool2D(
                pool_size=2,
                strides=2))

            self.output = nn.HybridSequential(prefix="")
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(
                units=classes,
                in_units=in_channels)) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:26,代碼來源:msdnet.py

示例5: test_reshape_pooling2d

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def test_reshape_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     shape,
                     pooling_layer,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.reshape = shape
                self.pool0 = pooling_layer

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape(self.reshape)
            out = self.pool0(x_reshape)
            return out

    x = mx.nd.random.uniform(shape=(4, 32, 32, 32))
    shape = (4, 64, 64, -1)
    for i in range(len(pooling_layers)):
        net = Net(shape, pooling_layers[i])
        check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:28,代碼來源:test_gluon.py

示例6: test_slice_pooling2d

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def test_slice_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     slice,
                     pooling_layer,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.slice = slice
                self.pool0 = pooling_layer

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
            out = self.pool0(x_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    slice = [(0, 0, 0, 0), (4, 16, 32, 64)]
    for i in range(len(pooling_layers)):
        net = Net(slice, pooling_layers[i])
        check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:28,代碼來源:test_gluon.py

示例7: test_reshape_pooling2d_reshape_pooling2d

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def test_reshape_pooling2d_reshape_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 2), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     shape,
                     pooling_layer1,
                     pooling_layer2,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.reshape = shape
                self.pool0 = pooling_layer1
                self.pool1 = pooling_layer2

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape(self.reshape[0])
            y = self.pool0(x_reshape)
            y_reshape = y.reshape(self.reshape[1])
            out = self.pool1(y_reshape)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    shape = [(128, 256, 64, -1), (128, 256, 11, -1)]
    for i in range(len(pooling_layers)):
        for j in range(len(pooling_layers)):
            if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
                shape[1] = (256, 128, 1, 1)
            net = Net(shape, pooling_layers[i], pooling_layers[j])
            check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:35,代碼來源:test_gluon.py

示例8: test_slice_pooling2d_slice_pooling2d

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def test_slice_pooling2d_slice_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     slice,
                     pooling_layer1,
                     pooling_layer2,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.slice = slice
                self.pool0 = pooling_layer1
                self.pool1 = pooling_layer2

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])
            y = self.pool0(x_slice)
            y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])
            out = self.pool1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    slice = [[(8, 0, 100, 50), (16, -1, -1, -1)], [(0, 64, 0, 50), (2, -1, -1, -1)]]
    for i in range(len(pooling_layers)):
        for j in range(len(pooling_layers)):
            if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
                slice[1] = [(0, 64, 0, 0), (2, -1, 1, 1)]
            net = Net(slice, pooling_layers[i], pooling_layers[j])
            check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:35,代碼來源:test_gluon.py

示例9: test_reshape_pooling2d_slice_pooling2d

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def test_reshape_pooling2d_slice_pooling2d():
    max_pooling = nn.MaxPool2D(strides=(2, 3), padding=(1, 1))
    avg_pooling = nn.AvgPool2D(strides=(2, 2), padding=(1, 1))
    global_maxpooling = nn.GlobalMaxPool2D()
    global_avgpooling = nn.GlobalAvgPool2D()
    pooling_layers = [max_pooling, avg_pooling, global_maxpooling, global_avgpooling]
    class Net(gluon.HybridBlock):
        def __init__(self,
                     shape,
                     slice,
                     pooling_layer1,
                     pooling_layer2,
                     **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.reshape = shape
                self.slice = slice
                self.pool0 = pooling_layer1
                self.pool1 = pooling_layer2

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape(self.reshape)
            y = self.pool0(x_reshape)
            y_slice = y.slice(begin=self.slice[0], end=self.slice[1])
            out = self.pool1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
    shape = (0, 512, 64, -1)
    slice = [(8, 256, 10, 20), (-1, -1, -1, 70)]
    for i in range(len(pooling_layers)):
        for j in range(len(pooling_layers)):
            if isinstance(pooling_layers[i], (nn.GlobalMaxPool2D, nn.GlobalAvgPool2D)):
                slice = [(8, 256, 0, 0), (-1, -1, 1, 1)]
            net = Net(shape, slice, pooling_layers[i], pooling_layers[j])
            check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:38,代碼來源:test_gluon.py

示例10: _make_transition

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def _make_transition(num_output_features):
    out = nn.HybridSequential(prefix='')
    out.add(nn.BatchNorm())
    #out.add(nn.Activation('relu'))
    out.add(Act())
    out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
    out.add(nn.AvgPool2D(pool_size=2, strides=2))
    return out

# Net 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:12,代碼來源:fdensenet.py

示例11: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self, scale, m, classes=1000, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(ResidualAttentionModel, self).__init__(**kwargs)
        assert len(scale) == 3 and len(m) == 3
        m1, m2, m3 = m
        with self.name_scope():
            self.conv1 = nn.HybridSequential()
            with self.conv1.name_scope():
                self.conv1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, use_bias=False))
                self.conv1.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
                self.conv1.add(nn.Activation('relu'))
            self.mpool1 = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
            self.residual_block1 = ResidualBlock(256, in_channels=64)
            self.attention_module1 = nn.HybridSequential()
            _add_block(self.attention_module1, AttentionModule_stage1, m1, 256, scale=scale,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)
            self.residual_block2 = ResidualBlock(512, in_channels=256, stride=2)
            self.attention_module2 = nn.HybridSequential()
            _add_block(self.attention_module2, AttentionModule_stage2, m2, 512, scale=scale,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)
            self.residual_block3 = ResidualBlock(1024, in_channels=512, stride=2)
            self.attention_module3 = nn.HybridSequential()
            _add_block(self.attention_module3, AttentionModule_stage3, m3, 1024, scale=scale,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)
            self.residual_block4 = ResidualBlock(2048, in_channels=1024, stride=2)
            self.residual_block5 = ResidualBlock(2048)
            self.residual_block6 = ResidualBlock(2048)
            self.mpool2 = nn.HybridSequential()
            with self.mpool2.name_scope():
                self.mpool2.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
                self.mpool2.add(nn.Activation('relu'))
                self.mpool2.add(nn.AvgPool2D(pool_size=7, strides=1))
            self.fc = nn.Conv2D(classes, kernel_size=1) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:34,代碼來源:residual_attentionnet.py

示例12: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self, version, classes=1000, **kwargs):
        super(SqueezeNet, self).__init__(**kwargs)
        assert version in ['1.0', '1.1'], ("Unsupported SqueezeNet version {version}:"
                                           "1.0 or 1.1 expected".format(version=version))
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            if version == '1.0':
                self.features.add(nn.Conv2D(96, kernel_size=7, strides=2))
                self.features.add(nn.Activation('relu'))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
                self.features.add(_make_fire(16, 64, 64))
                self.features.add(_make_fire(16, 64, 64))
                self.features.add(_make_fire(32, 128, 128))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
                self.features.add(_make_fire(32, 128, 128))
                self.features.add(_make_fire(48, 192, 192))
                self.features.add(_make_fire(48, 192, 192))
                self.features.add(_make_fire(64, 256, 256))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
                self.features.add(_make_fire(64, 256, 256))
            else:
                self.features.add(nn.Conv2D(64, kernel_size=3, strides=2))
                self.features.add(nn.Activation('relu'))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
                self.features.add(_make_fire(16, 64, 64))
                self.features.add(_make_fire(16, 64, 64))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
                self.features.add(_make_fire(32, 128, 128))
                self.features.add(_make_fire(32, 128, 128))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
                self.features.add(_make_fire(48, 192, 192))
                self.features.add(_make_fire(48, 192, 192))
                self.features.add(_make_fire(64, 256, 256))
                self.features.add(_make_fire(64, 256, 256))
            self.features.add(nn.Dropout(0.5))

            self.output = nn.HybridSequential(prefix='')
            self.output.add(nn.Conv2D(classes, kernel_size=1))
            self.output.add(nn.Activation('relu'))
            self.output.add(nn.AvgPool2D(13))
            self.output.add(nn.Flatten()) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:43,代碼來源:squeezenet.py

示例13: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self, channels, cardinality, bottleneck_width, stride,
                 downsample=False, downsample_kernel_size=3, avg_down=False,
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(SEBlock, self).__init__(**kwargs)
        D = int(math.floor(channels * (bottleneck_width / 64)))
        group_width = cardinality * D

        self.body = nn.HybridSequential(prefix='')
        self.body.add(nn.Conv2D(group_width // 2, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(group_width, kernel_size=3, strides=stride, padding=1,
                                groups=cardinality, use_bias=False))
        self.body.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        self.body.add(nn.Activation('relu'))
        self.body.add(nn.Conv2D(channels * 4, kernel_size=1, use_bias=False))
        self.body.add(norm_layer(gamma_initializer='zeros',
                                 **({} if norm_kwargs is None else norm_kwargs)))

        self.se = nn.HybridSequential(prefix='')
        self.se.add(nn.Conv2D(channels // 4, kernel_size=1, padding=0))
        self.se.add(nn.Activation('relu'))
        self.se.add(nn.Conv2D(channels * 4, kernel_size=1, padding=0))
        self.se.add(nn.Activation('sigmoid'))

        if downsample:
            self.downsample = nn.HybridSequential(prefix='')
            if avg_down:
                self.downsample.add(nn.AvgPool2D(pool_size=stride, strides=stride,
                                                 ceil_mode=True, count_include_pad=False))
                self.downsample.add(nn.Conv2D(channels=channels * 4, kernel_size=1,
                                              strides=1, use_bias=False))
            else:
                downsample_padding = 1 if downsample_kernel_size == 3 else 0
                self.downsample.add(nn.Conv2D(channels * 4, kernel_size=downsample_kernel_size,
                                              strides=stride,
                                              padding=downsample_padding, use_bias=False))
            self.downsample.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
        else:
            self.downsample = None 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:42,代碼來源:senet.py

示例14: _make_transition

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def _make_transition(num_output_features, norm_layer, norm_kwargs):
    out = nn.HybridSequential(prefix='')
    out.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    out.add(nn.Activation('relu'))
    out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
    out.add(nn.AvgPool2D(pool_size=2, strides=2))
    return out

# Net 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:11,代碼來源:densenet.py

示例15: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import AvgPool2D [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000,
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
                                        strides=2, padding=3, use_bias=False))
            self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(
                    num_layers, bn_size, growth_rate, dropout, i+1, norm_layer, norm_kwargs))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2, norm_layer, norm_kwargs))
                    num_features = num_features // 2
            self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.AvgPool2D(pool_size=7))
            self.features.add(nn.Flatten())

            self.output = nn.Dense(classes) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:28,代碼來源:densenet.py


注:本文中的mxnet.gluon.nn.AvgPool2D方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。