當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.CuDNNLSTM方法代碼示例

本文整理匯總了Python中keras.layers.CuDNNLSTM方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.CuDNNLSTM方法的具體用法?Python layers.CuDNNLSTM怎麽用?Python layers.CuDNNLSTM使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.layers的用法示例。


在下文中一共展示了layers.CuDNNLSTM方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def create(inputtokens, vocabsize, units=16, dropout=0, embedding=32):

        input_ = Input(shape=(inputtokens,), dtype='int32')

        # Embedding layer
        net = Embedding(input_dim=vocabsize, output_dim=embedding, input_length=inputtokens)(input_)
        net = Dropout(dropout)(net)

        # Bidirectional LSTM layer
        net = BatchNormalization()(net)
        net = Bidirectional(CuDNNLSTM(units))(net)
        net = Dropout(dropout)(net)

        # Output layer
        net = Dense(vocabsize, activation='softmax')(net)
        model = Model(inputs=input_, outputs=net)

        # Make data-parallel
        ngpus = len(get_available_gpus())
        if ngpus > 1:
            model = make_parallel(model, ngpus)

        return model 
開發者ID:albarji,項目名稱:neurowriter,代碼行數:25,代碼來源:models.py

示例2: nnet

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def nnet(inputs, keep_prob, num_classes):
        """
        # 適用於單導聯的深度網絡模型
        :param inputs: keras tensor, 切片並堆疊後的單導聯信號.
        :param keep_prob: float, dropout-隨機片段屏蔽概率.
        :param num_classes: int, 目標類別數.
        :return: keras tensor, 各類概率及全連接層前自動提取的特征.
        """
        branches = []
        for i in range(int(inputs.shape[-1])):
            ld = Lambda(Net.__slice, output_shape=(int(inputs.shape[1]), 1), arguments={'index': i})(inputs)
            ld = Reshape((int(inputs.shape[1]), 1))(ld)
            bch = Net.__backbone(ld)
            branches.append(bch)
        features = Concatenate(axis=1)(branches)
        features = Dropout(keep_prob, [1, int(inputs.shape[-1]), 1])(features)
        features = Bidirectional(CuDNNLSTM(1, return_sequences=True), merge_mode='concat')(features)
        features = Flatten()(features)
        net = Dense(units=num_classes, activation='softmax')(features)
        return net, features 
開發者ID:Aiwiscal,項目名稱:CPSC_Scheme,代碼行數:22,代碼來源:CPSC_model.py

示例3: lstm

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix],
    #              trainable=False)(inp)
    x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate)(input_layer)
    #x = CuDNNLSTM(recurrent_units, return_sequences=True)(x)
    x = Dropout(dropout_rate)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b])
    x = Dense(dense_size, activation="relu")(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()
    model.compile(loss='binary_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
    return model


# bidirectional LSTM 
開發者ID:kermitt2,項目名稱:delft,代碼行數:29,代碼來源:models.py

示例4: build_and_compile_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def build_and_compile_model():
    seq_input = Input(shape=(lookback_window, 1), name='seq_input', batch_shape=(1, lookback_window, 1))
    x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=True)(seq_input)
    x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=False)(x)
    output_1 = Dense(1, activation='linear', name='output_1')(x)

    weathernet = Model(inputs=seq_input, outputs=output_1)
    weathernet.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse')
    weathernet.summary()
    return weathernet

#Load existing model 
開發者ID:produvia,項目名稱:ai-platform,代碼行數:14,代碼來源:main.py

示例5: build_and_compile_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def build_and_compile_model():
    seq_input = Input(shape=(lookback_window, 1), name='seq_input', batch_shape=(1, lookback_window, 1))
    x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=True)(seq_input)
    x = CuDNNLSTM(LSTMunits, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', return_sequences=False)(x)
    output_1 = Dense(1, activation='linear', name='output_1')(x)

    weathernet = Model(inputs=seq_input, outputs=output_1)
    weathernet.compile(optimizer=keras.optimizers.Adam(lr=1e-3), loss='mse')
    weathernet.summary()
    return weathernet

# Predict 
開發者ID:produvia,項目名稱:ai-platform,代碼行數:14,代碼來源:train_weathernet.py

示例6: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        x = self.word_embedding.output
        # x = Reshape((self.len_max, self.embed_size, 1))(embedding)
        if self.rnn_type=="LSTM":
                layer_cell = LSTM
        elif self.rnn_type=="GRU":
                layer_cell = GRU
        elif self.rnn_type=="CuDNNLSTM":
                layer_cell = CuDNNLSTM
        elif self.rnn_type=="CuDNNGRU":
                layer_cell = CuDNNGRU
        else:
            layer_cell = GRU

        # Bi-LSTM
        for nrl in range(self.num_rnn_layers):
            x = Bidirectional(layer_cell(units=self.rnn_units,
                                         return_sequences=True,
                                         activation='relu',
                                         kernel_regularizer=regularizers.l2(0.32 * 0.1),
                                         recurrent_regularizer=regularizers.l2(0.32)
                                         ))(x)
            x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output = [dense_layer]
        self.model = Model(self.word_embedding.input, output)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:37,代碼來源:graph.py

示例7: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        x = self.word_embedding.output
        embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(x)

        if self.rnn_units=="LSTM":
                layer_cell = LSTM
        elif self.rnn_units=="GRU":
                layer_cell = GRU
        elif self.rnn_units=="CuDNNLSTM":
                layer_cell = CuDNNLSTM
        elif self.rnn_units=="CuDNNGRU":
                layer_cell = CuDNNGRU
        else:
            layer_cell = GRU
        # CNN
        convs = []
        for kernel_size in self.filters:
            conv = Conv1D(self.filters_num,
                            kernel_size=kernel_size,
                            strides=1,
                            padding='SAME',
                            kernel_regularizer=regularizers.l2(self.l2),
                            bias_regularizer=regularizers.l2(self.l2),
                            )(embedding_output_spatial)
            convs.append(conv)
        x = Concatenate(axis=1)(convs)
        # Bi-LSTM, 論文中使用的是LSTM
        x = Bidirectional(layer_cell(units=self.rnn_units,
                                     return_sequences=True,
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(self.l2),
                                     recurrent_regularizer=regularizers.l2(self.l2)
                                     ))(x)
        x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output = [dense_layer]
        self.model = Model(self.word_embedding.input, output)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:48,代碼來源:graph.py

示例8: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import CuDNNLSTM [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡, a bit like RCNN, R
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        x = self.word_embedding.output
        x = Activation('tanh')(x)

        # entire embedding channels are dropped out instead of the
        # normal Keras embedding dropout, which drops all channels for entire words
        # many of the datasets contain so few words that losing one or more words can alter the emotions completely
        x = SpatialDropout1D(self.dropout_spatial)(x)

        if self.rnn_units=="LSTM":
                layer_cell = LSTM
        elif self.rnn_units=="GRU":
                layer_cell = GRU
        elif self.rnn_units=="CuDNNLSTM":
                layer_cell = CuDNNLSTM
        elif self.rnn_units=="CuDNNGRU":
                layer_cell = CuDNNGRU
        else:
            layer_cell = GRU


        # skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
        # ordering of the way the merge is done is important for consistency with the pretrained model
        lstm_0_output = Bidirectional(layer_cell(units=self.rnn_units,
                                                 return_sequences=True,
                                                 activation='relu',
                                                 kernel_regularizer=regularizers.l2(self.l2),
                                                 recurrent_regularizer=regularizers.l2(self.l2)
                                                 ), name="bi_lstm_0")(x)
        lstm_1_output = Bidirectional(layer_cell(units=self.rnn_units,
                                                 return_sequences=True,
                                                 activation='relu',
                                                 kernel_regularizer=regularizers.l2(self.l2),
                                                 recurrent_regularizer=regularizers.l2(self.l2)
                                                 ), name="bi_lstm_1")(lstm_0_output)
        x = concatenate([lstm_1_output, lstm_0_output, x])

        # if return_attention is True in AttentionWeightedAverage, an additional tensor
        # representing the weight at each timestep is returned
        weights = None
        x = AttentionWeightedAverage(name='attlayer', return_attention=self.return_attention)(x)
        if self.return_attention:
            x, weights = x

        x = Dropout(self.dropout)(x)
        # x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output = [dense_layer]
        self.model = Model(self.word_embedding.input, output)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:59,代碼來源:graph.py


注:本文中的keras.layers.CuDNNLSTM方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。