当前位置: 首页>>代码示例>>Python>>正文


Python nn.Sequential方法代码示例

本文整理汇总了Python中mxnet.gluon.nn.Sequential方法的典型用法代码示例。如果您正苦于以下问题:Python nn.Sequential方法的具体用法?Python nn.Sequential怎么用?Python nn.Sequential使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.Sequential方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=InstanceNorm):
        super(Bottleneck, self).__init__()
        self.expansion = 4
        self.downsample = downsample
        if self.downsample is not None:
            self.residual_layer = nn.Conv2D(in_channels=inplanes, 
                                            channels=planes * self.expansion,
                                            kernel_size=1, strides=(stride, stride))
        self.conv_block = nn.Sequential()
        with self.conv_block.name_scope():
            self.conv_block.add(norm_layer(in_channels=inplanes))
            self.conv_block.add(nn.Activation('relu'))
            self.conv_block.add(nn.Conv2D(in_channels=inplanes, channels=planes, 
                                 kernel_size=1))
            self.conv_block.add(norm_layer(in_channels=planes))
            self.conv_block.add(nn.Activation('relu'))
            self.conv_block.add(ConvLayer(planes, planes, kernel_size=3, 
                stride=stride))
            self.conv_block.add(norm_layer(in_channels=planes))
            self.conv_block.add(nn.Activation('relu'))
            self.conv_block.add(nn.Conv2D(in_channels=planes, 
                                 channels=planes * self.expansion, 
                                 kernel_size=1)) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:25,代码来源:net.py

示例2: test_basic

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def test_basic():
    model = nn.Sequential()
    model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
    model.add(nn.Dropout(0.5))
    model.add(nn.Dense(64, activation='tanh', in_units=256),
              nn.Dense(32, in_units=64))
    model.add(nn.Activation('relu'))

    # symbol
    x = mx.sym.var('data')
    y = model(x)
    assert len(y.list_arguments()) == 7

    # ndarray
    model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
    x = model(mx.nd.zeros((32, 2, 10)))
    assert x.shape == (32, 32)
    x.wait_to_read()

    model.collect_params().setattr('grad_req', 'null')
    assert list(model.collect_params().values())[0]._grad is None
    model.collect_params().setattr('grad_req', 'write')
    assert list(model.collect_params().values())[0]._grad is not None 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:25,代码来源:test_gluon.py

示例3: test_lambda

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'),
             nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'),
             nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'),
             nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)
    assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:test_gluon.py

示例4: test_summary

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def test_summary():
    net = gluon.model_zoo.vision.resnet50_v1()
    net.initialize()
    net.summary(mx.nd.ones((32, 3, 224, 224)))

    net2 = nn.Sequential()
    with net2.name_scope():
        net2.add(nn.Embedding(40, 30))
        net2.add(gluon.rnn.LSTM(30))
        net2.add(nn.Dense(40, flatten=False, params=net2[0].params))
    net2.initialize()
    net2.summary(mx.nd.ones((80, 32)))

    net3 = gluon.rnn.LSTM(30)
    net3.initialize()
    begin_state = net3.begin_state(32)
    net3.summary(mx.nd.ones((80, 32, 5)), begin_state)

    net.hybridize()
    assert_raises(AssertionError, net.summary, mx.nd.ones((32, 3, 224, 224))) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:22,代码来源:test_gluon.py

示例5: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self,
                 g,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout,
                 aggregator_type):
        super(GraphSAGE, self).__init__()
        self.g = g

        with self.name_scope():
            self.layers = nn.Sequential()
            # input layer
            self.layers.add(SAGEConv(in_feats, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
            # hidden layers
            for i in range(n_layers - 1):
                self.layers.add(SAGEConv(n_hidden, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
            # output layer
            self.layers.add(SAGEConv(n_hidden, n_classes, aggregator_type, feat_drop=dropout, activation=None)) # activation None 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:main.py

示例6: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self,
                 in_feats,
                 out_feats,
                 n_steps,
                 n_etypes,
                 bias=True):
        super(GatedGraphConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._n_steps = n_steps
        self._n_etypes = n_etypes
        if not bias:
            raise KeyError('MXNet do not support disabling bias in GRUCell.')
        with self.name_scope():
            self.linears = nn.Sequential()
            for _ in range(n_etypes):
                self.linears.add(
                    nn.Dense(out_feats,
                             weight_initializer=mx.init.Xavier(),
                             in_units=out_feats)
                )
            self.gru = gluon.rnn.GRUCell(out_feats, input_size=out_feats) 
开发者ID:dmlc,项目名称:dgl,代码行数:24,代码来源:gatedgraphconv.py

示例7: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self,
                 in_feats,
                 out_feats,
                 k,
                 bias=True):
        super(DenseChebConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._k = k
        with self.name_scope():
            self.fc = nn.Sequential()
            for _ in range(k):
                self.fc.add(
                    nn.Dense(out_feats, in_units=in_feats, use_bias=False,
                             weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)))
                )
            if bias:
                self.bias = self.params.get('bias', shape=(out_feats,),
                                            init=mx.init.Zero())
            else:
                self.bias = None 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:densechebconv.py

示例8: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self,
                 in_feats,
                 out_feats,
                 k,
                 bias=True):
        super(ChebConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._k = k
        with self.name_scope():
            self.fc = nn.Sequential()
            for _ in range(k):
                self.fc.add(
                    nn.Dense(out_feats, use_bias=False,
                             weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                             in_units=in_feats)
                )
            if bias:
                self.bias = self.params.get('bias', shape=(out_feats,),
                                            init=mx.init.Zero())
            else:
                self.bias = None 
开发者ID:dmlc,项目名称:dgl,代码行数:24,代码来源:chebconv.py

示例9: get_netD

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def get_netD():
    # build the discriminator
    netD = nn.Sequential()
    with netD.name_scope():
        # input is (nc) x 64 x 64
        netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf) x 32 x 32
        netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf*2) x 16 x 16
        netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf*4) x 8 x 8
        netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf*8) x 4 x 4
        netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
        # state size. 2 x 1 x 1

    return netD 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:26,代码来源:dcgan.py

示例10: _build_custom_neural_network

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def _build_custom_neural_network(num_inputs, num_labels):
        from mxnet.gluon import nn

        net = nn.Sequential(prefix='custom_')
        with net.name_scope():
            net.add(nn.Dense(512, in_units=num_inputs, activation='relu', prefix='dense0_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(512, activation='relu', prefix='dense1_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(256, activation='relu', prefix='dense2_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(128, activation='relu', prefix='dense3_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(64, activation='relu', prefix='dense4_'))
            net.add(nn.BatchNorm())
            net.add(nn.Dropout(0.5))
            net.add(nn.Dense(num_labels, prefix='dense5_'))
        return net 
开发者ID:yulingtianxia,项目名称:AudioEmotion,代码行数:24,代码来源:sound_classifier.py

示例11: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self, kernel_size, channels_out, channels_in, stride=1, with_bn=True, **kwargs):
        #super(residual, self).__init__(**kwargs)
        super(residual, self).__init__()
        with self.name_scope():
            self.conv1 = nn.Conv2D(channels_out, kernel_size=(3,3), strides=(stride, stride), padding=(1,1), in_channels=channels_in, use_bias=False)
            self.bn1   = nn.BatchNorm(in_channels= channels_out)

            self.conv2 = nn.Conv2D(channels_out, kernel_size=(3,3), strides=(1, 1), padding=(1,1), in_channels = channels_out,use_bias=False)
            self.bn2   = nn.BatchNorm(in_channels= channels_out)

            #self.skip = nn.HybridSequential()
            self.skip = nn.Sequential()
            if stride != 1 or channels_in != channels_out:
                self.skip.add( nn.Conv2D(channels_out, kernel_size=(1,1), strides=(stride, stride), in_channels= channels_in, use_bias=False),
                               nn.BatchNorm(in_channels= channels_out)
                ) 
开发者ID:Guanghan,项目名称:mxnet-centernet,代码行数:18,代码来源:hourglass.py

示例12: net_define

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def net_define():
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
        net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=2, dropout=0.2))
        net.add(transpose(axes=(0,2,1)))
        # net.add(nn.MaxPool2D(pool_size=(config.MAX_LENGTH,1)))
        # net.add(nn.Conv2D(128, kernel_size=(101,1), padding=(50,0), groups=128,activation='relu'))
        net.add(PrimeConvCap(8,32, kernel_size=(1,1), padding=(0,0)))
        # net.add(AdvConvCap(8,32,8,32, kernel_size=(1,1), padding=(0,0)))
        net.add(CapFullyBlock(8*(config.MAX_LENGTH)/2, num_cap=12, input_units=32, units=16, route_num=5))
        # net.add(CapFullyBlock(8*(config.MAX_LENGTH-8), num_cap=12, input_units=32, units=16, route_num=5))
        # net.add(CapFullyBlock(8, num_cap=12, input_units=32, units=16, route_num=5))
        net.add(nn.Dropout(0.2))
        # net.add(LengthBlock())
        net.add(nn.Dense(6, activation='sigmoid'))
    net.initialize(init=init.Xavier())
    return net 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:20,代码来源:net.py

示例13: net_define_eu

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def net_define_eu():
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
        net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2))
        net.add(transpose(axes=(0,2,1)))
        net.add(nn.GlobalMaxPool1D())
        '''
        net.add(FeatureBlock1())
        '''
        net.add(extendDim(axes=3))
        net.add(PrimeConvCap(16, 32, kernel_size=(1,1), padding=(0,0),strides=(1,1)))
        net.add(CapFullyNGBlock(16, num_cap=12, input_units=32, units=16, route_num=3))
        net.add(nn.Dropout(0.2))
        net.add(nn.Dense(6, activation='sigmoid'))
    net.initialize(init=init.Xavier())
    return net 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:19,代码来源:net.py

示例14: resnet18

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def resnet18(num_classes):
    """The ResNet-18 model."""
    net = nn.Sequential()
    net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
            nn.BatchNorm(), nn.Activation('relu'))

    def resnet_block(num_channels, num_residuals, first_block=False):
        blk = nn.Sequential()
        for i in range(num_residuals):
            if i == 0 and not first_block:
                blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
            else:
                blk.add(Residual(num_channels))
        return blk

    net.add(resnet_block(64, 2, first_block=True),
            resnet_block(128, 2),
            resnet_block(256, 2),
            resnet_block(512, 2))
    net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
    return net 
开发者ID:d2l-ai,项目名称:d2l-zh,代码行数:23,代码来源:utils.py

示例15: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Sequential [as 别名]
def __init__(self, kwspaces, softmax_temperature=1.0, ctx=mx.cpu(), **kwargs):
        super().__init__(**kwargs)
        self.softmax_temperature = softmax_temperature
        self.spaces = list(kwspaces.items())
        self.context = ctx

        # only support Categorical space for now
        self.num_tokens = []
        for _, space in self.spaces:
            assert isinstance(space, Categorical)
            self.num_tokens.append(len(space))

        # controller lstm
        self.decoders = nn.Sequential()
        for idx, size in enumerate(self.num_tokens):
            self.decoders.add(Alpha((size,))) 
开发者ID:awslabs,项目名称:autogluon,代码行数:18,代码来源:rl_controller.py


注:本文中的mxnet.gluon.nn.Sequential方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。