本文整理汇总了Python中mxnet.gluon.nn.Conv1D方法的典型用法代码示例。如果您正苦于以下问题:Python nn.Conv1D方法的具体用法?Python nn.Conv1D怎么用?Python nn.Conv1D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.Conv1D方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Conv1D [as 别名]
def __init__(
self,
channels: int,
kernel_size: int,
dilation: int = 1,
activation: Optional[str] = None,
**kwargs,
):
super(CausalConv1D, self).__init__(**kwargs)
self.dilation = dilation
self.kernel_size = kernel_size
self.padding = dilation * (kernel_size - 1)
self.conv1d = nn.Conv1D(
channels=channels,
kernel_size=kernel_size,
dilation=dilation,
padding=self.padding,
activation=activation,
**kwargs,
)
# noinspection PyMethodOverriding
示例2: conv1d
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Conv1D [as 别名]
def conv1d(channels, kernel_size, in_channels, use_bias=True, **kwargs):
"""
Conv1D with better default initialization.
"""
n = in_channels
kernel_size = (
kernel_size if isinstance(kernel_size, list) else [kernel_size]
)
for k in kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
winit = mx.initializer.Uniform(stdv)
if use_bias:
binit = mx.initializer.Uniform(stdv)
else:
binit = "zeros"
return nn.Conv1D(
channels=channels,
kernel_size=kernel_size,
in_channels=in_channels,
use_bias=use_bias,
weight_initializer=winit,
bias_initializer=binit,
**kwargs,
)
示例3: test_conv
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Conv1D [as 别名]
def test_conv():
layers1d = [
nn.Conv1D(16, 3, in_channels=4),
nn.Conv1D(16, 3, groups=2, in_channels=4),
nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4),
]
for layer in layers1d:
check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2D(16, (3, 4), in_channels=4),
nn.Conv2D(16, (5, 4), in_channels=4),
nn.Conv2D(16, (3, 4), groups=2, in_channels=4),
nn.Conv2D(16, (3, 4), strides=4, in_channels=4),
nn.Conv2D(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2D(16, (3, 4), padding=4, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
layers3d = [
nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'),
nn.Conv3D(16, (5, 4, 3), in_channels=4),
nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4),
nn.Conv3D(16, 4, strides=4, in_channels=4),
nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4),
]
for layer in layers3d:
check_layer_forward(layer, (1, 4, 10, 10, 10))
layer = nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 4))
layer = nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 10, 4))
示例4: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Conv1D [as 别名]
def __init__(self, n_class, **kwargs):
super().__init__()
self.fc = nn.Conv1D(channels=n_class, kernel_size=1)
示例5: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Conv1D [as 别名]
def __init__(self, num_series, conv_hid, gru_hid, skip_gru_hid, skip, ar_window):
super(LSTNet, self).__init__()
kernel_size = 6
dropout_rate = 0.2
self.skip = skip
self.ar_window = ar_window
with self.name_scope():
self.conv = nn.Conv1D(conv_hid, kernel_size=kernel_size, layout='NCW', activation='relu')
self.dropout = nn.Dropout(dropout_rate)
self.gru = rnn.GRU(gru_hid, layout='TNC')
self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')
self.fc = nn.Dense(num_series)
self.ar_fc = nn.Dense(1)
示例6: __init__
# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import Conv1D [as 别名]
def __init__(self, **kwargs):
super(FeatureBlock, self).__init__(**kwargs)
self.gru = rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2)
self.conv3 = nn.Conv1D(channels=128, kernel_size=5, padding=2, strides=1, activation='relu')
self.conv5 = nn.Conv1D(channels=128, kernel_size=9, padding=4, strides=1, activation='relu')
self.conv7 = nn.Conv1D(channels=128, kernel_size=13, padding=6, strides=1, activation='relu')
self.conv_drop = nn.Dropout(0.2)