當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Dense方法代碼示例

本文整理匯總了Python中mxnet.gluon.nn.Dense方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Dense方法的具體用法?Python nn.Dense怎麽用?Python nn.Dense使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.Dense方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
        super(VGG, self).__init__(**kwargs)
        assert len(layers) == len(filters)
        with self.name_scope():
            self.features = self._make_features(layers, filters, batch_norm)
            self.features.add(Dense(4096, activation='relu',
                                       weight_initializer='normal',
                                       bias_initializer='zeros'))
            self.features.add(Dropout(rate=0.5))
            self.features.add(Dense(4096, activation='relu',
                                       weight_initializer='normal',
                                       bias_initializer='zeros'))
            self.features.add(Dropout(rate=0.5))
            self.output = Dense(classes,
                                   weight_initializer='normal',
                                   bias_initializer='zeros') 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:vgg.py

示例2: test_concurrent

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_concurrent():
    model = HybridConcurrent(axis=1)
    model.add(nn.Dense(128, activation='tanh', in_units=10))
    model.add(nn.Dense(64, activation='tanh', in_units=10))
    model.add(nn.Dense(32, in_units=10))
    model2 = Concurrent(axis=1)
    model2.add(nn.Dense(128, activation='tanh', in_units=10))
    model2.add(nn.Dense(64, activation='tanh', in_units=10))
    model2.add(nn.Dense(32, in_units=10))

    # symbol
    x = mx.sym.var('data')
    y = model(x)
    assert len(y.list_arguments()) == 7

    # ndarray
    model.initialize(mx.init.Xavier(magnitude=2.24))
    model2.initialize(mx.init.Xavier(magnitude=2.24))
    x = model(mx.nd.zeros((32, 10)))
    x2 = model2(mx.nd.zeros((32, 10)))
    assert x.shape == (32, 224)
    assert x2.shape == (32, 224)
    x.wait_to_read()
    x2.wait_to_read() 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:26,代碼來源:test_gluon_contrib.py

示例3: test_exc_gluon

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_exc_gluon():
    def gluon(exec_wait=True):
        model = nn.Sequential()
        model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
        model.add(nn.Dropout(1))
        model.add(nn.Dense(64, activation='tanh', in_units=256),
                  nn.Dense(32, in_units=64))
        x = mx.sym.var('data')
        y = model(x)
        model.collect_params().initialize(ctx=[default_context()])
        z = model(mx.nd.random.normal(10, -10, (32, 2, 10), ctx=default_context()))
        if exec_wait:
            z.wait_to_read()

    gluon(exec_wait=False)
    assert_raises(MXNetError, gluon, True) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:test_exc_handling.py

示例4: test_basic

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_basic():
    model = nn.Sequential()
    model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
    model.add(nn.Dropout(0.5))
    model.add(nn.Dense(64, activation='tanh', in_units=256),
              nn.Dense(32, in_units=64))
    model.add(nn.Activation('relu'))

    # symbol
    x = mx.sym.var('data')
    y = model(x)
    assert len(y.list_arguments()) == 7

    # ndarray
    model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
    x = model(mx.nd.zeros((32, 2, 10)))
    assert x.shape == (32, 32)
    x.wait_to_read()

    model.collect_params().setattr('grad_req', 'null')
    assert list(model.collect_params().values())[0]._grad is None
    model.collect_params().setattr('grad_req', 'write')
    assert list(model.collect_params().values())[0]._grad is not None 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:test_gluon.py

示例5: test_dense

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_dense():
    model = nn.Dense(128, activation='tanh', in_units=10, flatten=False, prefix='test_')
    inputs = mx.sym.Variable('data')
    outputs = model(inputs)
    assert set(model.collect_params().keys()) == set(['test_weight', 'test_bias'])
    assert outputs.list_outputs() == ['test_tanh_fwd_output']
    args, outs, auxs = outputs.infer_shape(data=(2, 3, 10))
    assert outs == [(2, 3, 128)]

    model = nn.Dense(128, activation='relu', in_units=30, flatten=True, prefix='test2_')
    inputs = mx.sym.Variable('data')
    outputs = model(inputs)
    assert set(model.collect_params().keys()) == set(['test2_weight', 'test2_bias'])
    assert outputs.list_outputs() == ['test2_relu_fwd_output']
    args, outs, auxs = outputs.infer_shape(data=(17, 2, 5, 3))
    assert outs == [(17, 128)] 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:test_gluon.py

示例6: test_inline

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_inline():
    net = mx.gluon.nn.HybridSequential()
    with net.name_scope():
        net.add(mx.gluon.nn.Dense(10))
        net.add(mx.gluon.nn.Dense(10))
        net.add(mx.gluon.nn.Dense(10))

    net.initialize()
    net.hybridize(inline_limit=3)
    with mx.autograd.record():
        y = net(mx.nd.zeros((1,10)))

    len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
    y.backward()

    net.hybridize(inline_limit=0)
    with mx.autograd.record():
        y = net(mx.nd.zeros((1,10)))

    len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
    y.backward()

    assert len_1 == len_2 + 2 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:test_gluon.py

示例7: test_reshape_dense

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_reshape_dense():
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                channel0 = np.random.randint(1, 17)
                self.dense0 = nn.Dense(channel0)

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape((8, 64, 128, -1))
            out = self.dense0(x_reshape)
            return out

    x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
    net = Net()
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:18,代碼來源:test_gluon.py

示例8: test_slice_dense

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_slice_dense():
    class Net(gluon.HybridBlock):
        def __init__(self, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                channel0 = np.random.randint(1, 17)
                self.dense0 = nn.Dense(channel0)
                self.slice = slice

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=tuple(self.slice[0]),
                              end=tuple(self.slice[1]))
            out = self.dense0(x_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
    slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
    net = Net(slice)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:21,代碼來源:test_gluon.py

示例9: test_slice_dense_slice_dense

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_slice_dense_slice_dense():
    class Net(gluon.HybridBlock):
        def __init__(self, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                channel0 = 32
                channel1 = np.random.randint(1, 17)
                self.dense0 = nn.Dense(channel0)
                self.dense1 = nn.Dense(channel1)
                self.slice = slice

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
            y = self.dense0(x_slice)
            y_slice = y.slice(begin=(1, 0), end=(3, 10))
            out = self.dense1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
    slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
    net = Net(slice)
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:24,代碼來源:test_gluon.py

示例10: test_reshape_dense_reshape_dense

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_reshape_dense_reshape_dense():
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                channel0 = np.random.randint(1, 17)
                channel1 = np.random.randint(1, 33)
                self.dense0 = nn.Dense(channel0)
                self.dense1 = nn.Dense(channel1)

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape((4, 16, 128, 32))
            y = self.dense0(x_reshape)
            y_reshape = y.reshape((1, -1))
            out = self.dense1(y_reshape)
            return out

    x = mx.nd.random.uniform(shape=(4, 16, 64, 64))
    net = Net()
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:22,代碼來源:test_gluon.py

示例11: test_reshape_dense_slice_dense

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def test_reshape_dense_slice_dense():
    class Net(gluon.HybridBlock):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                channel0 = 64
                channel1 = np.random.randint(1, 17)
                self.dense0 = nn.Dense(channel0)
                self.dense1 = nn.Dense(channel1)

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape((4, 16, 128, 32))
            y = self.dense0(x_reshape)
            y_slice = y.slice(begin=(1, 32), end=(3, 64))
            out = self.dense1(y_slice)
            return out

    x = mx.nd.random.uniform(shape=(4, 16, 64, 64))
    net = Net()
    check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:22,代碼來源:test_gluon.py

示例12: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:27,代碼來源:fdensenet.py

示例13: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def __init__(self,
                 rating_vals,
                 in_units,
                 num_basis_functions=2,
                 dropout_rate=0.0):
        super(BiDecoder, self).__init__()
        self.rating_vals = rating_vals
        self._num_basis_functions = num_basis_functions
        self.dropout = nn.Dropout(dropout_rate)
        self.Ps = []
        with self.name_scope():
            for i in range(num_basis_functions):
                self.Ps.append(self.params.get(
                    'Ps_%d' % i, shape=(in_units, in_units),
                    #init=mx.initializer.Orthogonal(scale=1.1, rand_type='normal'),
                    init=mx.initializer.Xavier(magnitude=math.sqrt(2.0)),
                    allow_deferred_init=True))
            self.rate_out = nn.Dense(units=len(rating_vals), flatten=False, use_bias=False) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:20,代碼來源:model.py

示例14: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def __init__(self,
                 in_feats,
                 out_feats,
                 feat_drop=0.,
                 bias=True,
                 norm=None,
                 activation=None):
        super(DenseSAGEConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._norm = norm
        with self.name_scope():
            self.feat_drop = nn.Dropout(feat_drop)
            self.activation = activation
            self.fc = nn.Dense(out_feats, in_units=in_feats, use_bias=bias,
                               weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0))) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:18,代碼來源:densesageconv.py

示例15: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Dense [as 別名]
def __init__(self,
                 in_feats,
                 out_feats,
                 n_steps,
                 n_etypes,
                 bias=True):
        super(GatedGraphConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._n_steps = n_steps
        self._n_etypes = n_etypes
        if not bias:
            raise KeyError('MXNet do not support disabling bias in GRUCell.')
        with self.name_scope():
            self.linears = nn.Sequential()
            for _ in range(n_etypes):
                self.linears.add(
                    nn.Dense(out_feats,
                             weight_initializer=mx.init.Xavier(),
                             in_units=out_feats)
                )
            self.gru = gluon.rnn.GRUCell(out_feats, input_size=out_feats) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:24,代碼來源:gatedgraphconv.py


注:本文中的mxnet.gluon.nn.Dense方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。