當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn.GRU屬性代碼示例

本文整理匯總了Python中mxnet.gluon.rnn.GRU屬性的典型用法代碼示例。如果您正苦於以下問題:Python rnn.GRU屬性的具體用法?Python rnn.GRU怎麽用?Python rnn.GRU使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在mxnet.gluon.rnn的用法示例。


在下文中一共展示了rnn.GRU屬性的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: net_define

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def net_define():
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
        net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=2, dropout=0.2))
        net.add(transpose(axes=(0,2,1)))
        # net.add(nn.MaxPool2D(pool_size=(config.MAX_LENGTH,1)))
        # net.add(nn.Conv2D(128, kernel_size=(101,1), padding=(50,0), groups=128,activation='relu'))
        net.add(PrimeConvCap(8,32, kernel_size=(1,1), padding=(0,0)))
        # net.add(AdvConvCap(8,32,8,32, kernel_size=(1,1), padding=(0,0)))
        net.add(CapFullyBlock(8*(config.MAX_LENGTH)/2, num_cap=12, input_units=32, units=16, route_num=5))
        # net.add(CapFullyBlock(8*(config.MAX_LENGTH-8), num_cap=12, input_units=32, units=16, route_num=5))
        # net.add(CapFullyBlock(8, num_cap=12, input_units=32, units=16, route_num=5))
        net.add(nn.Dropout(0.2))
        # net.add(LengthBlock())
        net.add(nn.Dense(6, activation='sigmoid'))
    net.initialize(init=init.Xavier())
    return net 
開發者ID:Godricly,項目名稱:comment_toxic_CapsuleNet,代碼行數:20,代碼來源:net.py

示例2: net_define_eu

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def net_define_eu():
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
        net.add(rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2))
        net.add(transpose(axes=(0,2,1)))
        net.add(nn.GlobalMaxPool1D())
        '''
        net.add(FeatureBlock1())
        '''
        net.add(extendDim(axes=3))
        net.add(PrimeConvCap(16, 32, kernel_size=(1,1), padding=(0,0),strides=(1,1)))
        net.add(CapFullyNGBlock(16, num_cap=12, input_units=32, units=16, route_num=3))
        net.add(nn.Dropout(0.2))
        net.add(nn.Dense(6, activation='sigmoid'))
    net.initialize(init=init.Xavier())
    return net 
開發者ID:Godricly,項目名稱:comment_toxic_CapsuleNet,代碼行數:19,代碼來源:net.py

示例3: __init__

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def __init__(self, mode, vocab_size, num_embed, num_hidden,
                 num_layers, dropout=0.5, tie_weights=False, **kwargs):
        super(RNNModel, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            self.encoder = nn.Embedding(vocab_size, num_embed,
                                        weight_initializer=mx.init.Uniform(0.1))
            if mode == 'rnn_relu':
                self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'rnn_tanh':
                self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'lstm':
                self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
                                    input_size=num_embed)
            elif mode == 'gru':
                self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            else:
                raise ValueError("Invalid mode %s. Options are rnn_relu, "
                                 "rnn_tanh, lstm, and gru"%mode)

            if tie_weights:
                self.decoder = nn.Dense(vocab_size, in_units=num_hidden,
                                        params=self.encoder.params)
            else:
                self.decoder = nn.Dense(vocab_size, in_units=num_hidden)

            self.num_hidden = num_hidden 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:32,代碼來源:model.py

示例4: __init__

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def __init__(self,**kwargs):
        super(SMN_Last,self).__init__(**kwargs)
        with self.name_scope():
            
            self.Embed = nn.Embedding(411721,256)
            # agg param
            self.gru = rnn.GRU(1024,2,layout='NTC')
            self.mlp_1 = nn.Dense(units=60,flatten=False,activation='relu')
            self.mlp_2 = nn.Dense(units=1,flatten=False)
            # lstm param
            self.topic_embedding = self.params.get('param_test',shape=(1024,2000)) 
開發者ID:NonvolatileMemory,項目名稱:AAAI_2019_EXAM,代碼行數:13,代碼來源:TextEXAM_multi-label.py

示例5: __init__

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def __init__(self, mode, vocab_size, num_embed, num_hidden,
                 num_layers, dropout=0.5, tie_weights=False, **kwargs):
        super(RNNModel, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            self.encoder = nn.Embedding(vocab_size, num_embed,
                                        weight_initializer=mx.init.Uniform(0.1))
            if mode == 'rnn_relu':
                self.rnn = rnn.RNN(num_hidden, 'relu', num_layers, dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'rnn_tanh':
                self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'lstm':
                self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
                                    input_size=num_embed)
            elif mode == 'gru':
                self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            else:
                raise ValueError("Invalid mode %s. Options are rnn_relu, "
                                 "rnn_tanh, lstm, and gru"%mode)

            if tie_weights:
                self.decoder = nn.Dense(vocab_size, in_units=num_hidden,
                                        params=self.encoder.params)
            else:
                self.decoder = nn.Dense(vocab_size, in_units=num_hidden)

            self.num_hidden = num_hidden 
開發者ID:awslabs,項目名稱:deeplearning-benchmark,代碼行數:32,代碼來源:model.py

示例6: __init__

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def __init__(self, num_series, conv_hid, gru_hid, skip_gru_hid, skip, ar_window):
        super(LSTNet, self).__init__()
        kernel_size = 6
        dropout_rate = 0.2
        self.skip = skip
        self.ar_window = ar_window
        with self.name_scope():
            self.conv = nn.Conv1D(conv_hid, kernel_size=kernel_size, layout='NCW', activation='relu')
            self.dropout = nn.Dropout(dropout_rate)
            self.gru = rnn.GRU(gru_hid, layout='TNC')
            self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')
            self.fc = nn.Dense(num_series)
            self.ar_fc = nn.Dense(1) 
開發者ID:safrooze,項目名稱:LSTNet-Gluon,代碼行數:15,代碼來源:model.py

示例7: forward

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def forward(self, x):
        """
        :param nd.NDArray x: input data in NTC layout (N: batch-size, T: sequence len, C: channels)
        :return: output of LSTNet in NC layout
        :rtype nd.NDArray
        """
        # Convolution
        c = self.conv(x.transpose((0, 2, 1)))  # Transpose NTC to to NCT (a.k.a NCW) before convolution
        c = self.dropout(c)

        # GRU
        r = self.gru(c.transpose((2, 0, 1)))  # Transpose NCT to TNC before GRU
        r = r[-1]  # Only keep the last output
        r = self.dropout(r)  # Now in NC layout

        # Skip GRU
        # Slice off multiples of skip from convolution output
        skip_c = c[:, :, -(c.shape[2] // self.skip) * self.skip:]
        skip_c = skip_c.reshape(c.shape[0], c.shape[1], -1, self.skip)  # Reshape to NCT x skip
        skip_c = skip_c.transpose((2, 0, 3, 1))  # Transpose to T x N x skip x C
        skip_c = skip_c.reshape(skip_c.shape[0], -1, skip_c.shape[3])  # Reshape to Tx (Nxskip) x C
        s = self.skip_gru(skip_c)
        s = s[-1]  # Only keep the last output (now in (Nxskip) x C layout)
        s = s.reshape(x.shape[0], -1)  # Now in N x (skipxC) layout

        # FC layer
        fc = self.fc(nd.concat(r, s))  # NC layout

        # Autoregressive highway
        ar_x = x[:, -self.ar_window:, :]  # NTC layout
        ar_x = ar_x.transpose((0, 2, 1))  # NCT layout
        ar_x = ar_x.reshape(-1, ar_x.shape[2])  # (NC) x T layout
        ar = self.ar_fc(ar_x)
        ar = ar.reshape(x.shape[0], -1)  # NC layout

        # Add autoregressive and fc outputs
        res = fc + ar
        return res 
開發者ID:safrooze,項目名稱:LSTNet-Gluon,代碼行數:40,代碼來源:model.py

示例8: __init__

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def __init__(self, **kwargs):
        super(FeatureBlock, self).__init__(**kwargs)
        self.gru = rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2)
        self.conv3 = nn.Conv1D(channels=128, kernel_size=5, padding=2, strides=1, activation='relu')
        self.conv5 = nn.Conv1D(channels=128, kernel_size=9, padding=4, strides=1, activation='relu')
        self.conv7 = nn.Conv1D(channels=128, kernel_size=13, padding=6, strides=1, activation='relu')
        self.conv_drop = nn.Dropout(0.2) 
開發者ID:Godricly,項目名稱:comment_toxic_CapsuleNet,代碼行數:9,代碼來源:net.py

示例9: __init__

# 需要導入模塊: from mxnet.gluon import rnn [as 別名]
# 或者: from mxnet.gluon.rnn import GRU [as 別名]
def __init__(
        self,
        mode: str,
        num_hidden: int,
        num_layers: int,
        bidirectional: bool = False,
        **kwargs,
    ):
        super(RNN, self).__init__(**kwargs)

        with self.name_scope():
            if mode == "rnn_relu":
                self.rnn = rnn.RNN(
                    num_hidden,
                    num_layers,
                    bidirectional=bidirectional,
                    activation="relu",
                    layout="NTC",
                )
            elif mode == "rnn_tanh":
                self.rnn = rnn.RNN(
                    num_hidden,
                    num_layers,
                    bidirectional=bidirectional,
                    layout="NTC",
                )
            elif mode == "lstm":
                self.rnn = rnn.LSTM(
                    num_hidden,
                    num_layers,
                    bidirectional=bidirectional,
                    layout="NTC",
                )
            elif mode == "gru":
                self.rnn = rnn.GRU(
                    num_hidden,
                    num_layers,
                    bidirectional=bidirectional,
                    layout="NTC",
                )
            else:
                raise ValueError(
                    "Invalid mode %s. Options are rnn_relu, rnn_tanh, lstm, and gru "
                    % mode
                ) 
開發者ID:awslabs,項目名稱:gluon-ts,代碼行數:47,代碼來源:rnn.py


注:本文中的mxnet.gluon.rnn.GRU屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。