當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.HybridSequential方法代碼示例

本文整理匯總了Python中mxnet.gluon.nn.HybridSequential方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.HybridSequential方法的具體用法?Python nn.HybridSequential怎麽用?Python nn.HybridSequential使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.HybridSequential方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_lambda

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'),
             nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'),
             nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'),
             nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)
    assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:21,代碼來源:test_gluon.py

示例2: test_inline

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def test_inline():
    net = mx.gluon.nn.HybridSequential()
    with net.name_scope():
        net.add(mx.gluon.nn.Dense(10))
        net.add(mx.gluon.nn.Dense(10))
        net.add(mx.gluon.nn.Dense(10))

    net.initialize()
    net.hybridize(inline_limit=3)
    with mx.autograd.record():
        y = net(mx.nd.zeros((1,10)))

    len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
    y.backward()

    net.hybridize(inline_limit=0)
    with mx.autograd.record():
        y = net(mx.nd.zeros((1,10)))

    len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
    y.backward()

    assert len_1 == len_2 + 2 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:test_gluon.py

示例3: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
開發者ID:deepinsight,項目名稱:insightocr,代碼行數:22,代碼來源:fnasnet.py

示例4: _make_dense_layer

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
    out.add(gluon.contrib.nn.Identity())
    out.add(new_features)

    return out 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:20,代碼來源:fdensenet.py

示例5: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:27,代碼來源:fdensenet.py

示例6: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(NormalCell, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:26,代碼來源:fnasnet.py

示例7: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, prefix, entry_block3_stride, use_global_stats, norm_layer):
        super(EntryFlow, self).__init__(prefix)

        with self.name_scope():
            self.conv1 = nn.HybridSequential(prefix='conv1_')
            with self.conv1.name_scope():
                self.conv1.add(nn.Conv2D(32, kernel_size=3, strides=2, padding=1, use_bias=False, prefix='1_'))
                self.conv1.add(norm_layer(in_channels=32, use_global_stats=use_global_stats, prefix='1_BN_'))
                self.conv1.add(nn.Activation("relu"))
            self.conv2 = nn.HybridSequential(prefix='conv1_')
            with self.conv2.name_scope():
                self.conv2.add(nn.Conv2D(64, kernel_size=3, padding=1, use_bias=False, prefix='2_'))
                self.conv2.add(norm_layer(in_channels=64, use_global_stats=use_global_stats, prefix='2_BN_'))
                self.conv2.add(nn.Activation("relu"))

            self.conv3 = XceptionBlock(filters_list=[128, 128, 128], kernel_size=3, strides=2,
                                       use_global_stats=use_global_stats, norm_layer=norm_layer,
                                       dilation=1, depth_activation=False, in_filters=64, prefix='block1_')
            self.conv4 = XceptionBlock(filters_list=[256, 256, 256], kernel_size=3, strides=2, return_skip=True,
                                       use_global_stats=use_global_stats, norm_layer=norm_layer,
                                       dilation=1, depth_activation=False, in_filters=128, prefix='block2_')
            self.conv5 = XceptionBlock(filters_list=[728, 728, 728], kernel_size=3, strides=entry_block3_stride,
                                       use_shortcut_conv=True, dilation=1, depth_activation=False, in_filters=256,
                                       norm_layer=norm_layer, use_global_stats=use_global_stats, prefix='block3_') 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:26,代碼來源:aspp_temp.py

示例8: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, depth, ctx, pretrained=True, num_classes=0):
        super(ResNet, self).__init__()
        self.pretrained = pretrained

        with self.name_scope():
            network = ResNet.__factory[depth](pretrained=pretrained, ctx=ctx).features[0:-1]
            network[-1][0].body[0]._kwargs['stride'] = (1, 1)
            network[-1][0].downsample[0]._kwargs['stride'] = (1, 1)
            self.base = nn.HybridSequential()
            for n in network:
                self.base.add(n)

            self.avgpool = nn.GlobalAvgPool2D()
            self.flatten = nn.Flatten()
            self.bn = nn.BatchNorm(center=False, scale=True)
            self.bn.initialize(init=init.Zero(), ctx=ctx)

            self.classifier = nn.Dense(num_classes, use_bias=False)
            self.classifier.initialize(init=init.Normal(0.001), ctx=ctx) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:21,代碼來源:resnet.py

示例9: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self):
        super(SRGenerator, self).__init__()
        self.conv1 = nn.Conv2D(64, kernel_size=3, strides=1,padding=1,activation='relu')
        self.res_block = nn.HybridSequential()
        with self.name_scope():
            for i in range(16):
                self.res_block.add(
                    ResnetBlock()
                )

            self.res_block.add(
                nn.Conv2D(64, kernel_size=3, strides=1,padding=1,use_bias=False),
                nn.BatchNorm()
            )
        self.subpix_block1 = SubpixelBlock()
        self.subpix_block2 = SubpixelBlock()
        self.conv4 = nn.Conv2D(3,kernel_size=1,strides=1,activation='tanh') 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:19,代碼來源:train_srgan.py

示例10: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, channels, size1=14, scale=(1, 2, 1),
                 norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(AttentionModule_stage3, self).__init__(**kwargs)
        p, t, r = scale
        with self.name_scope():
            self.first_residual_blocks = nn.HybridSequential()
            _add_block(self.first_residual_blocks, ResidualBlock, p, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.trunk_branches = nn.HybridSequential()
            _add_block(self.trunk_branches, ResidualBlock, t, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.mpool1 = nn.MaxPool2D(pool_size=3, strides=2, padding=1)

            self.softmax1_blocks = nn.HybridSequential()
            _add_block(self.softmax1_blocks, ResidualBlock, 2 * r, channels,
                       norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.interpolation1 = UpsamplingBilinear2d(size=size1)

            self.softmax2_blocks = nn.HybridSequential()
            _add_sigmoid_layer(self.softmax2_blocks, channels, norm_layer, norm_kwargs)

            self.last_blocks = ResidualBlock(channels) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:27,代碼來源:residual_attentionnet.py

示例11: _make_level

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def _make_level(self, block, inplanes, planes, blocks, norm_layer, norm_kwargs, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes:
            downsample = nn.HybridSequential()
            downsample.add(*[
                nn.MaxPool2D(stride, strides=stride),
                nn.Conv2D(channels=planes, in_channels=inplanes,
                          kernel_size=1, strides=1, use_bias=False),
                norm_layer(in_channels=planes, **norm_kwargs)])

        layers = []
        layers.append(block(inplanes, planes, stride,
                            norm_layer=norm_layer, norm_kwargs=norm_kwargs, downsample=downsample))
        for _ in range(1, blocks):
            layers.append(block(inplanes, planes, norm_layer=norm_layer, norm_kwargs=norm_kwargs))

        curr_level = nn.HybridSequential()
        curr_level.add(*layers)
        return curr_level 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:21,代碼來源:dla.py

示例12: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, dw_channels, out_channels, stride=1,
                 norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(_DSConv, self).__init__()
        with self.name_scope():
            self.conv = nn.HybridSequential()
            self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=dw_channels,
                                    kernel_size=3, strides=stride,
                                    padding=1, groups=dw_channels, use_bias=False))
            self.conv.add(norm_layer(in_channels=dw_channels,
                                     **({} if norm_kwargs is None else norm_kwargs)))
            self.conv.add(nn.Activation('relu'))
            self.conv.add(nn.Conv2D(in_channels=dw_channels, channels=out_channels,
                                    kernel_size=1, use_bias=False))
            self.conv.add(norm_layer(in_channels=out_channels,
                                     **({} if norm_kwargs is None else norm_kwargs)))
            self.conv.add(nn.Activation('relu')) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:18,代碼來源:fastscnn.py

示例13: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def __init__(self, startp, channels, scales, in_channels=None,
                 use_dcnv2=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(DLAUp, self).__init__(**kwargs)
        self.startp = startp
        if in_channels is None:
            in_channels = channels
        self.channels = channels
        channels = list(channels)
        scales = np.array(scales, dtype=int)
        with self.name_scope():
            self.idas = nn.HybridSequential('ida')
            for i in range(len(channels) - 1):
                j = -i - 2
                self.idas.add(IDAUp(channels[j], in_channels[j:],
                                    scales[j:] // scales[j], use_dcnv2=use_dcnv2,
                                    norm_layer=norm_layer, norm_kwargs=norm_kwargs))
                scales[j + 1:] = scales[j]
                in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:20,代碼來源:deconv_dla.py

示例14: test_collect_paramters

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def test_collect_paramters():
    net = nn.HybridSequential(prefix="test_")
    with net.name_scope():
        net.add(nn.Conv2D(10, 3))
        net.add(nn.Dense(10, activation='relu'))
    assert set(net.collect_params().keys()) == \
        set(['test_conv0_weight', 'test_conv0_bias','test_dense0_weight','test_dense0_bias'])
    assert set(net.collect_params('.*weight').keys()) == \
        set(['test_conv0_weight', 'test_dense0_weight'])
    assert set(net.collect_params('test_conv0_bias|test_dense0_bias').keys()) == \
        set(['test_conv0_bias', 'test_dense0_bias']) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:13,代碼來源:test_gluon.py

示例15: test_sequential

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import HybridSequential [as 別名]
def test_sequential():
    check_sequential(gluon.nn.Sequential())
    check_sequential(gluon.nn.HybridSequential()) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:5,代碼來源:test_gluon.py


注:本文中的mxnet.gluon.nn.HybridSequential方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。