本文整理汇总了Python中mxnet.gluon.nn.Dense方法的典型用法代码示例。如果您正苦于以下问题:Python nn.Dense方法的具体用法?Python nn.Dense怎么用?Python nn.Dense使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.Dense方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
super(VGG, self).__init__(**kwargs)
assert len(layers) == len(filters)
with self.name_scope():
self.features = self._make_features(layers, filters, batch_norm)
self.features.add(Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(Dropout(rate=0.5))
self.features.add(Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(Dropout(rate=0.5))
self.output = Dense(classes,
weight_initializer='normal',
bias_initializer='zeros')
示例2: test_concurrent
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_concurrent():
model = HybridConcurrent(axis=1)
model.add(nn.Dense(128, activation='tanh', in_units=10))
model.add(nn.Dense(64, activation='tanh', in_units=10))
model.add(nn.Dense(32, in_units=10))
model2 = Concurrent(axis=1)
model2.add(nn.Dense(128, activation='tanh', in_units=10))
model2.add(nn.Dense(64, activation='tanh', in_units=10))
model2.add(nn.Dense(32, in_units=10))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.initialize(mx.init.Xavier(magnitude=2.24))
model2.initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 10)))
x2 = model2(mx.nd.zeros((32, 10)))
assert x.shape == (32, 224)
assert x2.shape == (32, 224)
x.wait_to_read()
x2.wait_to_read()
示例3: test_exc_gluon
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_exc_gluon():
def gluon(exec_wait=True):
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(1))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
x = mx.sym.var('data')
y = model(x)
model.collect_params().initialize(ctx=[default_context()])
z = model(mx.nd.random.normal(10, -10, (32, 2, 10), ctx=default_context()))
if exec_wait:
z.wait_to_read()
gluon(exec_wait=False)
assert_raises(MXNetError, gluon, True)
示例4: test_basic
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 2, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
model.collect_params().setattr('grad_req', 'null')
assert list(model.collect_params().values())[0]._grad is None
model.collect_params().setattr('grad_req', 'write')
assert list(model.collect_params().values())[0]._grad is not None
示例5: test_dense
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_dense():
model = nn.Dense(128, activation='tanh', in_units=10, flatten=False, prefix='test_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test_weight', 'test_bias'])
assert outputs.list_outputs() == ['test_tanh_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(2, 3, 10))
assert outs == [(2, 3, 128)]
model = nn.Dense(128, activation='relu', in_units=30, flatten=True, prefix='test2_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test2_weight', 'test2_bias'])
assert outputs.list_outputs() == ['test2_relu_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(17, 2, 5, 3))
assert outs == [(17, 128)]
示例6: test_inline
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_inline():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10))
net.add(mx.gluon.nn.Dense(10))
net.add(mx.gluon.nn.Dense(10))
net.initialize()
net.hybridize(inline_limit=3)
with mx.autograd.record():
y = net(mx.nd.zeros((1,10)))
len_1 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
y.backward()
net.hybridize(inline_limit=0)
with mx.autograd.record():
y = net(mx.nd.zeros((1,10)))
len_2 = len(json.loads(mx.autograd.get_symbol(y).tojson())['nodes'])
y.backward()
assert len_1 == len_2 + 2
示例7: test_reshape_dense
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_reshape_dense():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
def hybrid_forward(self, F, x):
x_reshape = x.reshape((8, 64, 128, -1))
out = self.dense0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
示例8: test_slice_dense
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_slice_dense():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.slice = slice
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=tuple(self.slice[0]),
end=tuple(self.slice[1]))
out = self.dense0(x_slice)
return out
x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
示例9: test_slice_dense_slice_dense
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_slice_dense_slice_dense():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = 32
channel1 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
self.slice = slice
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
y = self.dense0(x_slice)
y_slice = y.slice(begin=(1, 0), end=(3, 10))
out = self.dense1(y_slice)
return out
x = mx.nd.random.uniform(shape=(16, 32, 64, 64))
slice = [[0, 16, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
示例10: test_reshape_dense_reshape_dense
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_reshape_dense_reshape_dense():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = np.random.randint(1, 17)
channel1 = np.random.randint(1, 33)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
def hybrid_forward(self, F, x):
x_reshape = x.reshape((4, 16, 128, 32))
y = self.dense0(x_reshape)
y_reshape = y.reshape((1, -1))
out = self.dense1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 16, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
示例11: test_reshape_dense_slice_dense
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def test_reshape_dense_slice_dense():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
channel0 = 64
channel1 = np.random.randint(1, 17)
self.dense0 = nn.Dense(channel0)
self.dense1 = nn.Dense(channel1)
def hybrid_forward(self, F, x):
x_reshape = x.reshape((4, 16, 128, 32))
y = self.dense0(x_reshape)
y_slice = y.slice(begin=(1, 32), end=(3, 64))
out = self.dense1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 16, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
示例12: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
strides=1, padding=1, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
#self.features.add(nn.AvgPool2D(pool_size=7))
#self.features.add(nn.Flatten())
#self.output = nn.Dense(classes)
示例13: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def __init__(self,
rating_vals,
in_units,
num_basis_functions=2,
dropout_rate=0.0):
super(BiDecoder, self).__init__()
self.rating_vals = rating_vals
self._num_basis_functions = num_basis_functions
self.dropout = nn.Dropout(dropout_rate)
self.Ps = []
with self.name_scope():
for i in range(num_basis_functions):
self.Ps.append(self.params.get(
'Ps_%d' % i, shape=(in_units, in_units),
#init=mx.initializer.Orthogonal(scale=1.1, rand_type='normal'),
init=mx.initializer.Xavier(magnitude=math.sqrt(2.0)),
allow_deferred_init=True))
self.rate_out = nn.Dense(units=len(rating_vals), flatten=False, use_bias=False)
示例14: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def __init__(self,
in_feats,
out_feats,
feat_drop=0.,
bias=True,
norm=None,
activation=None):
super(DenseSAGEConv, self).__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self._norm = norm
with self.name_scope():
self.feat_drop = nn.Dropout(feat_drop)
self.activation = activation
self.fc = nn.Dense(out_feats, in_units=in_feats, use_bias=bias,
weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)))
示例15: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dense [as 别名]
def __init__(self,
in_feats,
out_feats,
n_steps,
n_etypes,
bias=True):
super(GatedGraphConv, self).__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self._n_steps = n_steps
self._n_etypes = n_etypes
if not bias:
raise KeyError('MXNet do not support disabling bias in GRUCell.')
with self.name_scope():
self.linears = nn.Sequential()
for _ in range(n_etypes):
self.linears.add(
nn.Dense(out_feats,
weight_initializer=mx.init.Xavier(),
in_units=out_feats)
)
self.gru = gluon.rnn.GRUCell(out_feats, input_size=out_feats)