本文整理汇总了Python中mxnet.gluon.nn.Flatten方法的典型用法代码示例。如果您正苦于以下问题:Python nn.Flatten方法的具体用法?Python nn.Flatten怎么用?Python nn.Flatten使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.Flatten方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
strides=1, padding=1, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
#self.features.add(nn.AvgPool2D(pool_size=7))
#self.features.add(nn.Flatten())
#self.output = nn.Dense(classes)
示例2: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, depth, ctx, pretrained=True, num_classes=0):
super(ResNet, self).__init__()
self.pretrained = pretrained
with self.name_scope():
network = ResNet.__factory[depth](pretrained=pretrained, ctx=ctx).features[0:-1]
network[-1][0].body[0]._kwargs['stride'] = (1, 1)
network[-1][0].downsample[0]._kwargs['stride'] = (1, 1)
self.base = nn.HybridSequential()
for n in network:
self.base.add(n)
self.avgpool = nn.GlobalAvgPool2D()
self.flatten = nn.Flatten()
self.bn = nn.BatchNorm(center=False, scale=True)
self.bn.initialize(init=init.Zero(), ctx=ctx)
self.classifier = nn.Dense(num_classes, use_bias=False)
self.classifier.initialize(init=init.Normal(0.001), ctx=ctx)
示例3: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, multiplier=1.0, classes=1000,
norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(MobileNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
with self.features.name_scope():
_add_conv(self.features, channels=int(32 * multiplier), kernel=3, pad=1, stride=2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
dw_channels = [int(x * multiplier) for x in [32, 64] + [128] * 2 +
[256] *
2 +
[512] *
6 +
[1024]]
channels = [int(x * multiplier) for x in [64] +
[128] * 2 + [256] * 2 + [512] * 6 + [1024] * 2]
strides = [1, 2] * 3 + [1] * 5 + [2, 1]
for dwc, c, s in zip(dw_channels, channels, strides):
_add_conv_dw(self.features, dw_channels=dwc, channels=c, stride=s,
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
示例4: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, block, layers, channels, classes=10,
norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(CIFARResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(norm_layer(scale=False, center=False,
**({} if norm_kwargs is None else norm_kwargs)))
self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=in_channels,
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
in_channels = channels[i+1]
self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes, in_units=in_channels)
示例5: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, block, layers, channels, drop_rate, classes=10,
norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(CIFARWideResNet, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(norm_layer(scale=False, center=False,
**({} if norm_kwargs is None else norm_kwargs)))
self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False))
self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1], drop_rate,
stride, i+1, in_channels=in_channels,
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
in_channels = channels[i+1]
self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
示例6: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, classes=1000, **kwargs):
super(AlexNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
with self.features.name_scope():
self.features.add(nn.Conv2D(64, kernel_size=11, strides=4,
padding=2, activation='relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(nn.Conv2D(192, kernel_size=5, padding=2,
activation='relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(nn.Conv2D(384, kernel_size=3, padding=1,
activation='relu'))
self.features.add(nn.Conv2D(256, kernel_size=3, padding=1,
activation='relu'))
self.features.add(nn.Conv2D(256, kernel_size=3, padding=1,
activation='relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(nn.Flatten())
self.features.add(nn.Dense(4096, activation='relu'))
self.features.add(nn.Dropout(0.5))
self.features.add(nn.Dense(4096, activation='relu'))
self.features.add(nn.Dropout(0.5))
self.output = nn.Dense(classes)
示例7: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self,
in_channels,
classes,
**kwargs):
super(MSDClassifier, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2))
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2))
self.features.add(nn.AvgPool2D(
pool_size=2,
strides=2))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
示例8: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self,
channels,
bn_use_global_stats,
reduction_ratio=16,
num_layers=1,
**kwargs):
super(ChannelGate, self).__init__(**kwargs)
mid_channels = channels // reduction_ratio
with self.name_scope():
self.pool = nn.GlobalAvgPool2D()
self.flatten = nn.Flatten()
self.init_fc = DenseBlock(
in_channels=channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.main_fcs = nn.HybridSequential(prefix="")
for i in range(num_layers - 1):
self.main_fcs.add(DenseBlock(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats))
self.final_fc = nn.Dense(
units=channels,
in_units=mid_channels)
示例9: test_flatten
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def test_flatten():
flatten = nn.Flatten()
x = mx.nd.zeros((3,4,5,6))
assert flatten(x).shape == (3, 4*5*6)
x = mx.nd.zeros((3,6))
assert flatten(x).shape == (3, 6)
x = mx.nd.zeros((3,))
assert flatten(x).shape == (3, 1)
示例10: test_hybrid_stale_cache
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def test_hybrid_stale_cache():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.add(mx.gluon.nn.Flatten())
assert net(mx.nd.ones((2,3,5))).shape == (2, 30)
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=True)
net.initialize()
assert net(mx.nd.ones((2,3,5))).shape == (2, 10)
示例11: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, num_classes=1000, width_mult=1.0, **kwargs):
super(MobilenetV2, self).__init__(**kwargs)
self.w = width_mult
self.cn = [int(x*self.w) for x in [32, 16, 24, 32, 64, 96, 160, 320]]
def InvertedResidualSequence(t, cn_id, n, s):
seq = nn.HybridSequential()
seq.add(InvertedResidual(t, self.cn[cn_id-1], self.cn[cn_id], s, same_shape=False))
for _ in range(n-1):
seq.add(InvertedResidual(t, self.cn[cn_id-1], self.cn[cn_id], 1))
return seq
self.b0 = ConvBlock(self.cn[0], 3, 1)
self.b1 = InvertedResidualSequence(1, 1, 1, 1)
self.b2 = InvertedResidualSequence(6, 2, 2, 2)
self.b3 = InvertedResidualSequence(6, 3, 3, 2)
self.b4 = InvertedResidualSequence(6, 4, 4, 1)
self.b5 = InvertedResidualSequence(6, 5, 3, 2)
self.b6 = InvertedResidualSequence(6, 6, 3, 2)
self.b7 = InvertedResidualSequence(6, 7, 1, 1)
self.last_channels = int(1280*self.w) if self.w > 1.0 else 1280
with self.name_scope():
self.features = nn.HybridSequential()
with self.features.name_scope():
self.features.add(self.b0, self.b1, self.b2, self.b3, self.b4, self.b5, self.b6, self.b7)
self.features.add(Conv1x1(self.last_channels))
#self.features.add(nn.GlobalAvgPool2D())
#self.features.add(nn.Flatten())
#self.output = nn.Dense(num_classes)
示例12: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, m=1.0, **kwargs):
super(MNasNet, self).__init__(**kwargs)
self.first_oup = int(32*m)
self.second_oup = int(16*m)
#self.second_oup = int(32*m)
self.interverted_residual_setting = [
# t, c, n, s, k
[3, int(24*m), 3, 2, 3, "stage2_"], # -> 56x56
[3, int(40*m), 3, 2, 5, "stage3_"], # -> 28x28
[6, int(80*m), 3, 2, 5, "stage4_1_"], # -> 14x14
[6, int(96*m), 2, 1, 3, "stage4_2_"], # -> 14x14
[6, int(192*m), 4, 2, 5, "stage5_1_"], # -> 7x7
[6, int(320*m), 1, 1, 3, "stage5_2_"], # -> 7x7
]
self.last_channels = int(1024*m)
with self.name_scope():
self.features = nn.HybridSequential()
self.features.add(ConvBlock(self.first_oup, 3, 1, prefix="stage1_conv0_"))
self.features.add(SepCONV(self.first_oup, self.second_oup, 3, prefix="stage1_sepconv0_"))
inp = self.second_oup
for i, (t, c, n, s, k, prefix) in enumerate(self.interverted_residual_setting):
oup = c
self.features.add(ExpandedConvSequence(t, k, inp, oup, n, s, prefix=prefix))
inp = oup
self.features.add(Conv1x1(self.last_channels, prefix="stage5_3_"))
#self.features.add(nn.GlobalAvgPool2D())
#self.features.add(nn.Flatten())
#self.output = nn.Dense(num_classes)
示例13: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self):
super(SRDiscriminator,self).__init__()
self.model = nn.HybridSequential()
self.res_block = nn.HybridSequential()
df_dim = 64
with self.name_scope():
self.model.add(
nn.Conv2D(df_dim, 4, 2,1),
nn.LeakyReLU(0.2)
)
for i in [2,4,8,16,32]:
self.model.add(ConvBlock(df_dim * i ))
self.model.add(ConvBlock(df_dim * 16,1,1,padding=0))
self.model.add(
nn.Conv2D(df_dim * 8, 1, 1,use_bias=False),
nn.BatchNorm()
)
self.res_block.add(
ConvBlock(df_dim * 2, 1,1),
ConvBlock(df_dim * 2, 3, 1),
nn.Conv2D(df_dim * 8, 3, 1,use_bias=False),
nn.BatchNorm()
)
self.lrelu = nn.LeakyReLU(0.2)
self.flatten = nn.Flatten()
self.dense = nn.Dense(1)
示例14: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, version, classes=1000, **kwargs):
super(SqueezeNet, self).__init__(**kwargs)
assert version in ['1.0', '1.1'], ("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
if version == '1.0':
self.features.add(nn.Conv2D(96, kernel_size=7, strides=2))
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
self.features.add(_make_fire(16, 64, 64))
self.features.add(_make_fire(16, 64, 64))
self.features.add(_make_fire(32, 128, 128))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
self.features.add(_make_fire(32, 128, 128))
self.features.add(_make_fire(48, 192, 192))
self.features.add(_make_fire(48, 192, 192))
self.features.add(_make_fire(64, 256, 256))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
self.features.add(_make_fire(64, 256, 256))
else:
self.features.add(nn.Conv2D(64, kernel_size=3, strides=2))
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
self.features.add(_make_fire(16, 64, 64))
self.features.add(_make_fire(16, 64, 64))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
self.features.add(_make_fire(32, 128, 128))
self.features.add(_make_fire(32, 128, 128))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, ceil_mode=True))
self.features.add(_make_fire(48, 192, 192))
self.features.add(_make_fire(48, 192, 192))
self.features.add(_make_fire(64, 256, 256))
self.features.add(_make_fire(64, 256, 256))
self.features.add(nn.Dropout(0.5))
self.output = nn.HybridSequential(prefix='')
self.output.add(nn.Conv2D(classes, kernel_size=1))
self.output.add(nn.Activation('relu'))
self.output.add(nn.AvgPool2D(13))
self.output.add(nn.Flatten())
示例15: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Flatten [as 别名]
def __init__(self, block, layers, channels, classes=1000, thumbnail=False,
last_gamma=False, use_se=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(ResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(norm_layer(scale=False, center=False,
**({} if norm_kwargs is None else norm_kwargs)))
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=in_channels,
last_gamma=last_gamma, use_se=use_se,
norm_layer=norm_layer, norm_kwargs=norm_kwargs))
in_channels = channels[i+1]
self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes, in_units=in_channels)