本文整理汇总了Python中mxnet.gluon.nn.Dropout方法的典型用法代码示例。如果您正苦于以下问题:Python nn.Dropout方法的具体用法?Python nn.Dropout怎么用?Python nn.Dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.Dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
super(VGG, self).__init__(**kwargs)
assert len(layers) == len(filters)
with self.name_scope():
self.features = self._make_features(layers, filters, batch_norm)
self.features.add(Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(Dropout(rate=0.5))
self.features.add(Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(Dropout(rate=0.5))
self.output = Dense(classes,
weight_initializer='normal',
bias_initializer='zeros')
示例2: test_exc_gluon
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def test_exc_gluon():
def gluon(exec_wait=True):
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(1))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
x = mx.sym.var('data')
y = model(x)
model.collect_params().initialize(ctx=[default_context()])
z = model(mx.nd.random.normal(10, -10, (32, 2, 10), ctx=default_context()))
if exec_wait:
z.wait_to_read()
gluon(exec_wait=False)
assert_raises(MXNetError, gluon, True)
示例3: test_basic
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 2, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
model.collect_params().setattr('grad_req', 'null')
assert list(model.collect_params().values())[0]._grad is None
model.collect_params().setattr('grad_req', 'write')
assert list(model.collect_params().values())[0]._grad is not None
示例4: _make_dense_layer
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
#new_features.add(nn.Activation('relu'))
new_features.add(Act())
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
#new_features.add(nn.Activation('relu'))
new_features.add(Act())
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
out.add(gluon.contrib.nn.Identity())
out.add(new_features)
return out
示例5: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self,
rating_vals,
in_units,
num_basis_functions=2,
dropout_rate=0.0):
super(BiDecoder, self).__init__()
self.rating_vals = rating_vals
self._num_basis_functions = num_basis_functions
self.dropout = nn.Dropout(dropout_rate)
self.Ps = []
with self.name_scope():
for i in range(num_basis_functions):
self.Ps.append(self.params.get(
'Ps_%d' % i, shape=(in_units, in_units),
#init=mx.initializer.Orthogonal(scale=1.1, rand_type='normal'),
init=mx.initializer.Xavier(magnitude=math.sqrt(2.0)),
allow_deferred_init=True))
self.rate_out = nn.Dense(units=len(rating_vals), flatten=False, use_bias=False)
示例6: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self,
in_feats,
out_feats,
feat_drop=0.,
bias=True,
norm=None,
activation=None):
super(DenseSAGEConv, self).__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self._norm = norm
with self.name_scope():
self.feat_drop = nn.Dropout(feat_drop)
self.activation = activation
self.fc = nn.Dense(out_feats, in_units=in_feats, use_bias=bias,
weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)))
示例7: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
super(_FCNHead, self).__init__()
with self.name_scope():
self.block = nn.HybridSequential()
inter_channels = in_channels // 4
with self.block.name_scope():
self.block.add(nn.Conv2D(in_channels=in_channels, channels=inter_channels,
kernel_size=3, padding=1, use_bias=False))
self.block.add(norm_layer(in_channels=inter_channels,
**({} if norm_kwargs is None else norm_kwargs)))
self.block.add(nn.Activation('relu'))
self.block.add(nn.Dropout(0.1))
self.block.add(nn.Conv2D(in_channels=inter_channels, channels=channels,
kernel_size=1))
# pylint: disable=arguments-differ
示例8: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self, nclass, c1_channels=128, norm_layer=nn.BatchNorm, norm_kwargs=None,
height=128, width=128, **kwargs):
super(_DeepLabHead, self).__init__()
self._up_kwargs = {'height': height, 'width': width}
with self.name_scope():
self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs,
height=height//2, width=width//2, **kwargs)
self.c1_block = nn.HybridSequential()
self.c1_block.add(nn.Conv2D(in_channels=c1_channels, channels=48,
kernel_size=3, padding=1, use_bias=False))
self.c1_block.add(norm_layer(in_channels=48, **({} if norm_kwargs is None else norm_kwargs)))
self.c1_block.add(nn.Activation('relu'))
self.block = nn.HybridSequential()
self.block.add(nn.Conv2D(in_channels=304, channels=256,
kernel_size=3, padding=1, use_bias=False))
self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs)))
self.block.add(nn.Activation('relu'))
self.block.add(nn.Dropout(0.5))
self.block.add(nn.Conv2D(in_channels=256, channels=256,
kernel_size=3, padding=1, use_bias=False))
self.block.add(norm_layer(in_channels=256, **({} if norm_kwargs is None else norm_kwargs)))
self.block.add(nn.Activation('relu'))
self.block.add(nn.Dropout(0.1))
self.block.add(nn.Conv2D(in_channels=256, channels=nclass, kernel_size=1))
示例9: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self, layers, filters, classes=1000, batch_norm=False, **kwargs):
super(VGG, self).__init__(**kwargs)
assert len(layers) == len(filters)
with self.name_scope():
self.features = self._make_features(layers, filters, batch_norm)
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.output = nn.Dense(classes,
weight_initializer='normal',
bias_initializer='zeros')
示例10: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, radix=2, in_channels=None, r=2,
norm_layer=BatchNorm, norm_kwargs=None, drop_ratio=0,
*args, **kwargs):
super(SplitAttentionConv, self).__init__()
norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
inter_channels = max(in_channels*radix//2//r, 32)
self.radix = radix
self.cardinality = groups
self.conv = Conv2D(channels*radix, kernel_size, strides, padding, dilation,
groups=groups*radix, *args, in_channels=in_channels, **kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn = norm_layer(in_channels=channels*radix, **norm_kwargs)
self.relu = Activation('relu')
self.fc1 = Conv2D(inter_channels, 1, in_channels=channels, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(in_channels=inter_channels, **norm_kwargs)
self.relu1 = Activation('relu')
if drop_ratio > 0:
self.drop = nn.Dropout(drop_ratio)
else:
self.drop = None
self.fc2 = Conv2D(channels*radix, 1, in_channels=inter_channels, groups=self.cardinality)
self.channels = channels
示例11: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self, order_of_cheb, Kt, channels, num_of_vertices, keep_prob,
T, cheb_polys, activation='GLU', **kwargs):
super(St_conv_block, self).__init__(**kwargs)
c_si, c_t, c_oo = channels
self.order_of_cheb = order_of_cheb
self.Kt = Kt
self.keep_prob = keep_prob
self.seq = nn.HybridSequential()
self.seq.add(
Temporal_conv_layer(Kt, c_si, c_t, activation),
Spatio_conv_layer(order_of_cheb, c_t, c_t,
num_of_vertices, T - (Kt - 1), cheb_polys),
Temporal_conv_layer(Kt, c_t, c_oo),
nn.LayerNorm(axis=1),
nn.Dropout(1 - keep_prob)
)
示例12: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(DeepLabv3FinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
示例13: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(RoRBlock, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
示例14: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(DenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
示例15: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Dropout [as 别名]
def __init__(self,
groups,
dropout_rate,
**kwargs):
super(ChannelwiseConv2d, self).__init__(**kwargs)
self.use_dropout = (dropout_rate > 0.0)
with self.name_scope():
self.conv = nn.Conv3D(
channels=groups,
kernel_size=(4 * groups, 1, 1),
strides=(groups, 1, 1),
padding=(2 * groups - 1, 0, 0),
use_bias=False,
in_channels=1)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)