本文整理汇总了Python中mxnet.gluon.rnn.GRU属性的典型用法代码示例。如果您正苦于以下问题:Python rnn.GRU属性的具体用法?Python rnn.GRU怎么用?Python rnn.GRU使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mxnet.gluon.rnn
的用法示例。
在下文中一共展示了rnn.GRU属性的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: net_define
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def net_define():
net = nn.Sequential()
with net.name_scope():
net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=2, dropout=0.2))
net.add(transpose(axes=(0,2,1)))
# net.add(nn.MaxPool2D(pool_size=(config.MAX_LENGTH,1)))
# net.add(nn.Conv2D(128, kernel_size=(101,1), padding=(50,0), groups=128,activation='relu'))
net.add(PrimeConvCap(8,32, kernel_size=(1,1), padding=(0,0)))
# net.add(AdvConvCap(8,32,8,32, kernel_size=(1,1), padding=(0,0)))
net.add(CapFullyBlock(8*(config.MAX_LENGTH)/2, num_cap=12, input_units=32, units=16, route_num=5))
# net.add(CapFullyBlock(8*(config.MAX_LENGTH-8), num_cap=12, input_units=32, units=16, route_num=5))
# net.add(CapFullyBlock(8, num_cap=12, input_units=32, units=16, route_num=5))
net.add(nn.Dropout(0.2))
# net.add(LengthBlock())
net.add(nn.Dense(6, activation='sigmoid'))
net.initialize(init=init.Xavier())
return net
示例2: net_define_eu
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def net_define_eu():
net = nn.Sequential()
with net.name_scope():
net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2))
net.add(transpose(axes=(0,2,1)))
net.add(nn.GlobalMaxPool1D())
'''
net.add(FeatureBlock1())
'''
net.add(extendDim(axes=3))
net.add(PrimeConvCap(16, 32, kernel_size=(1,1), padding=(0,0),strides=(1,1)))
net.add(CapFullyNGBlock(16, num_cap=12, input_units=32, units=16, route_num=3))
net.add(nn.Dropout(0.2))
net.add(nn.Dense(6, activation='sigmoid'))
net.initialize(init=init.Xavier())
return net
示例3: __init__
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def __init__(self, mode, vocab_size, num_embed, num_hidden,
num_layers, dropout=0.5, tie_weights=False, **kwargs):
super(RNNModel, self).__init__(**kwargs)
with self.name_scope():
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(vocab_size, num_embed,
weight_initializer=mx.init.Uniform(0.1))
if mode == 'rnn_relu':
self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'rnn_tanh':
self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout,
input_size=num_embed)
elif mode == 'lstm':
self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'gru':
self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
else:
raise ValueError("Invalid mode %s. Options are rnn_relu, "
"rnn_tanh, lstm, and gru"%mode)
if tie_weights:
self.decoder = nn.Dense(vocab_size, in_units=num_hidden,
params=self.encoder.params)
else:
self.decoder = nn.Dense(vocab_size, in_units=num_hidden)
self.num_hidden = num_hidden
示例4: __init__
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def __init__(self,**kwargs):
super(SMN_Last,self).__init__(**kwargs)
with self.name_scope():
self.Embed = nn.Embedding(411721,256)
# agg param
self.gru = rnn.GRU(1024,2,layout='NTC')
self.mlp_1 = nn.Dense(units=60,flatten=False,activation='relu')
self.mlp_2 = nn.Dense(units=1,flatten=False)
# lstm param
self.topic_embedding = self.params.get('param_test',shape=(1024,2000))
示例5: __init__
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def __init__(self, mode, vocab_size, num_embed, num_hidden,
num_layers, dropout=0.5, tie_weights=False, **kwargs):
super(RNNModel, self).__init__(**kwargs)
with self.name_scope():
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(vocab_size, num_embed,
weight_initializer=mx.init.Uniform(0.1))
if mode == 'rnn_relu':
self.rnn = rnn.RNN(num_hidden, 'relu', num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'rnn_tanh':
self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'lstm':
self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
elif mode == 'gru':
self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
input_size=num_embed)
else:
raise ValueError("Invalid mode %s. Options are rnn_relu, "
"rnn_tanh, lstm, and gru"%mode)
if tie_weights:
self.decoder = nn.Dense(vocab_size, in_units=num_hidden,
params=self.encoder.params)
else:
self.decoder = nn.Dense(vocab_size, in_units=num_hidden)
self.num_hidden = num_hidden
示例6: __init__
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def __init__(self, num_series, conv_hid, gru_hid, skip_gru_hid, skip, ar_window):
super(LSTNet, self).__init__()
kernel_size = 6
dropout_rate = 0.2
self.skip = skip
self.ar_window = ar_window
with self.name_scope():
self.conv = nn.Conv1D(conv_hid, kernel_size=kernel_size, layout='NCW', activation='relu')
self.dropout = nn.Dropout(dropout_rate)
self.gru = rnn.GRU(gru_hid, layout='TNC')
self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')
self.fc = nn.Dense(num_series)
self.ar_fc = nn.Dense(1)
示例7: forward
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def forward(self, x):
"""
:param nd.NDArray x: input data in NTC layout (N: batch-size, T: sequence len, C: channels)
:return: output of LSTNet in NC layout
:rtype nd.NDArray
"""
# Convolution
c = self.conv(x.transpose((0, 2, 1))) # Transpose NTC to to NCT (a.k.a NCW) before convolution
c = self.dropout(c)
# GRU
r = self.gru(c.transpose((2, 0, 1))) # Transpose NCT to TNC before GRU
r = r[-1] # Only keep the last output
r = self.dropout(r) # Now in NC layout
# Skip GRU
# Slice off multiples of skip from convolution output
skip_c = c[:, :, -(c.shape[2] // self.skip) * self.skip:]
skip_c = skip_c.reshape(c.shape[0], c.shape[1], -1, self.skip) # Reshape to NCT x skip
skip_c = skip_c.transpose((2, 0, 3, 1)) # Transpose to T x N x skip x C
skip_c = skip_c.reshape(skip_c.shape[0], -1, skip_c.shape[3]) # Reshape to Tx (Nxskip) x C
s = self.skip_gru(skip_c)
s = s[-1] # Only keep the last output (now in (Nxskip) x C layout)
s = s.reshape(x.shape[0], -1) # Now in N x (skipxC) layout
# FC layer
fc = self.fc(nd.concat(r, s)) # NC layout
# Autoregressive highway
ar_x = x[:, -self.ar_window:, :] # NTC layout
ar_x = ar_x.transpose((0, 2, 1)) # NCT layout
ar_x = ar_x.reshape(-1, ar_x.shape[2]) # (NC) x T layout
ar = self.ar_fc(ar_x)
ar = ar.reshape(x.shape[0], -1) # NC layout
# Add autoregressive and fc outputs
res = fc + ar
return res
示例8: __init__
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def __init__(self, **kwargs):
super(FeatureBlock, self).__init__(**kwargs)
self.gru = rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2)
self.conv3 = nn.Conv1D(channels=128, kernel_size=5, padding=2, strides=1, activation='relu')
self.conv5 = nn.Conv1D(channels=128, kernel_size=9, padding=4, strides=1, activation='relu')
self.conv7 = nn.Conv1D(channels=128, kernel_size=13, padding=6, strides=1, activation='relu')
self.conv_drop = nn.Dropout(0.2)
示例9: __init__
# 需要导入模块: from mxnet.gluon import rnn [as 别名]
# 或者: from mxnet.gluon.rnn import GRU [as 别名]
def __init__(
self,
mode: str,
num_hidden: int,
num_layers: int,
bidirectional: bool = False,
**kwargs,
):
super(RNN, self).__init__(**kwargs)
with self.name_scope():
if mode == "rnn_relu":
self.rnn = rnn.RNN(
num_hidden,
num_layers,
bidirectional=bidirectional,
activation="relu",
layout="NTC",
)
elif mode == "rnn_tanh":
self.rnn = rnn.RNN(
num_hidden,
num_layers,
bidirectional=bidirectional,
layout="NTC",
)
elif mode == "lstm":
self.rnn = rnn.LSTM(
num_hidden,
num_layers,
bidirectional=bidirectional,
layout="NTC",
)
elif mode == "gru":
self.rnn = rnn.GRU(
num_hidden,
num_layers,
bidirectional=bidirectional,
layout="NTC",
)
else:
raise ValueError(
"Invalid mode %s. Options are rnn_relu, rnn_tanh, lstm, and gru "
% mode
)