當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.SpatialDropout1D方法代碼示例

本文整理匯總了Python中keras.layers.SpatialDropout1D方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.SpatialDropout1D方法的具體用法?Python layers.SpatialDropout1D怎麽用?Python layers.SpatialDropout1D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.layers的用法示例。


在下文中一共展示了layers.SpatialDropout1D方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: CapsuleNet

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
     n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
    K.clear_session()

    inputs = Input(shape=(170,))
    x = Embedding(21099, 300,  trainable=True)(inputs)        
    x = SpatialDropout1D(dropout_rate)(x)
    x = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x)
    x = PReLU()(x)
    x = Capsule(
        num_capsule=n_capsule, dim_capsule=capsule_dim,
        routings=n_routings, share_weights=True)(x)
    x = Flatten(name = 'concatenate')(x)
    x = Dropout(dropout_rate)(x)
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model 
開發者ID:WeavingWong,項目名稱:DigiX_HuaWei_Population_Age_Attribution_Predict,代碼行數:24,代碼來源:models.py

示例2: CapsuleNet_v2

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
     n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
    K.clear_session()

    inputs = Input(shape=(200,))
    x = Embedding(20000, 300,  trainable=True)(inputs)        
    x = SpatialDropout1D(dropout_rate)(x)
    x = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x)
    x = PReLU()(x)
    x = Capsule(
        num_capsule=n_capsule, dim_capsule=capsule_dim,
        routings=n_routings, share_weights=True)(x)
    x = Flatten(name = 'concatenate')(x)
    x = Dropout(dropout_rate)(x)
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model 
開發者ID:WeavingWong,項目名稱:DigiX_HuaWei_Population_Age_Attribution_Predict,代碼行數:24,代碼來源:models.py

示例3: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        x = self.word_embedding.output
        x = SpatialDropout1D(self.dropout_spatial)(x)
        x = AttentionSelf(self.word_embedding.embed_size)(x)
        x = GlobalMaxPooling1D()(x)
        x = Dropout(self.dropout)(x)
        # x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output = [dense_layer]
        self.model = Model(self.word_embedding.input, output)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:20,代碼來源:graph.py

示例4: build_model_text_cnn

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def build_model_text_cnn(self):
        #########    text-cnn    #########
        # bert embedding
        bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
        # text cnn
        bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
        concat_out = []
        for index, filter_size in enumerate(self.filters):
            x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
            x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
            concat_out.append(x)
        x = Concatenate(axis=1)(concat_out)
        x = Dropout(self.keep_prob)(x)

        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activation)(x)
        output_layers = [dense_layer]
        self.model = Model(bert_inputs, output_layers) 
開發者ID:yongzhuo,項目名稱:nlp_xiaojiang,代碼行數:20,代碼來源:keras_bert_classify_text_cnn.py

示例5: keras_dropout

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def keras_dropout(layer, rate):
    """
    Keras dropout layer.
    """

    from keras import layers

    input_dim = len(layer.input.shape)
    if input_dim == 2:
        return layers.SpatialDropout1D(rate)
    elif input_dim == 3:
        return layers.SpatialDropout2D(rate)
    elif input_dim == 4:
        return layers.SpatialDropout3D(rate)
    else:
        return layers.Dropout(rate) 
開發者ID:microsoft,項目名稱:nni,代碼行數:18,代碼來源:layers.py

示例6: Token_Embedding

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def Token_Embedding(x, input_dim, output_dim, embed_weights=None,
                    mask_zero=False, input_length=None, dropout_rate=0,
                    embed_l2=1E-6, name='', time_distributed=False, **kwargs):
    """
    Basic token embedding layer, also included some dropout layer.
    """
    embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None
    embed_layer = Embedding(input_dim=input_dim,
                            output_dim=output_dim,
                            weights=embed_weights,
                            mask_zero=mask_zero,
                            input_length=input_length,
                            embeddings_regularizer=embed_reg,
                            name=name)
    if time_distributed:
        embed = TimeDistributed(embed_layer)(x)
    else:
        embed = embed_layer(x)
    # entire embedding channels are dropped out instead of the
    # normal Keras embedding dropout, which drops all channels for entire words
    # many of the datasets contain so few words that losing one or more words can alter the emotions completely
    if dropout_rate != 0:
        embed = SpatialDropout1D(dropout_rate)(embed)
    return embed 
開發者ID:stevewyl,項目名稱:nlp_toolkit,代碼行數:26,代碼來源:embedding.py

示例7: dummy_1_build_fn

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def dummy_1_build_fn(input_shape=(1,)):
    model = Sequential(
        [
            Embedding(input_dim=9999, output_dim=200, input_length=100, trainable=True),
            SpatialDropout1D(rate=0.5),
            Flatten(),
            Dense(100, activation="relu"),
            Dense(1, activation="sigmoid"),
        ]
    )
    model.compile(
        optimizer=RMSprop(lr=0.02, decay=0.001),
        loss=mean_absolute_error,
        metrics=["mean_absolute_error"],
    )
    return model 
開發者ID:HunterMcGushion,項目名稱:hyperparameter_hunter,代碼行數:18,代碼來源:test_keras_helper.py

示例8: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 獲取CLS
        # # text cnn
        # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
        # concat_out = []
        # for index, filter_size in enumerate(self.filters):
        #     x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
        #                filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
        #                strides=1,
        #                kernel_size=self.filters[index],
        #                padding='valid',
        #                kernel_initializer='normal',
        #                activation='relu')(bert_output_emmbed)
        #     x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
        #     concat_out.append(x)
        # x = Concatenate(axis=1)(concat_out)
        # x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output_layers = [dense_layer]
        self.model = Model(self.word_embedding.input, output_layers)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:32,代碼來源:graph.py

示例9: word_level

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def word_level(self):
        x_input_word = Input(shape=(self.len_max, self.embed_size))
        # x = SpatialDropout1D(self.dropout_spatial)(x_input_word)
        x = Bidirectional(GRU(units=self.rnn_units,
                              return_sequences=True,
                              activation='relu',
                              kernel_regularizer=regularizers.l2(self.l2),
                              recurrent_regularizer=regularizers.l2(self.l2)))(x_input_word)
        out_sent = AttentionSelf(self.rnn_units*2)(x)
        model = Model(x_input_word, out_sent)
        return model 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:13,代碼來源:graph.py

示例10: sentence_level

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def sentence_level(self):
        x_input_sen = Input(shape=(self.len_max, self.rnn_units*2))
        # x = SpatialDropout1D(self.dropout_spatial)(x_input_sen)
        output_doc = Bidirectional(GRU(units=self.rnn_units*2,
                              return_sequences=True,
                              activation='relu',
                              kernel_regularizer=regularizers.l2(self.l2),
                              recurrent_regularizer=regularizers.l2(self.l2)))(x_input_sen)
        output_doc_att = AttentionSelf(self.word_embedding.embed_size)(output_doc)
        model = Model(x_input_sen, output_doc_att)
        return model 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:13,代碼來源:graph.py

示例11: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(embedding_output)

        # 首先是 region embedding 層
        conv_1 = Conv1D(self.filters[0][0],
                        kernel_size=1,
                        strides=1,
                        padding='SAME',
                        kernel_regularizer=l2(self.l2),
                        bias_regularizer=l2(self.l2),
                        activation=self.activation_conv,
                        )(embedding_output_spatial)
        block = ReLU()(conv_1)

        for filters_block in self.filters:
            for j in range(filters_block[1]-1):
                # conv + short-cut
                block_mid = self.convolutional_block(block, units=filters_block[0])
                block = shortcut_conv(block, block_mid, shortcut=True)
            # 這裏是conv + max-pooling
            block_mid = self.convolutional_block(block, units=filters_block[0])
            block = shortcut_pool(block, block_mid, filters=filters_block[0], pool_type=self.pool_type, shortcut=True)

        block = k_max_pooling(top_k=self.top_k)(block)
        block = Flatten()(block)
        block = Dropout(self.dropout)(block)
        # 全連接層
        # block_fully = Dense(2048, activation='tanh')(block)
        # output = Dense(2048, activation='tanh')(block_fully)
        output = Dense(self.label, activation=self.activate_classify)(block)
        self.model = Model(inputs=self.word_embedding.input, outputs=output)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:41,代碼來源:graph.py

示例12: create_model_gru

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def create_model_gru(self, hyper_parameters):
        """
            構建神經網絡, bi-gru + capsule
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding = self.word_embedding.output
        embed_layer = SpatialDropout1D(self.dropout)(embedding)
        x_bi = Bidirectional(GRU(self.filters_num,
                              activation='relu',
                              dropout=self.dropout,
                              recurrent_dropout=self.dropout,
                              return_sequences=True))(embed_layer)
        # 一層
        capsule = Capsule_bojone(num_capsule=self.num_capsule,
                              dim_capsule=self.dim_capsule,
                              routings=self.routings,
                              kernel_size=(3, 1),
                              share_weights=True)(x_bi)

        # # pooling多層
        # conv_pools = []
        # for filter in self.filters:
        #     capsule = Capsule_bojone(num_capsule=self.num_capsule,
        #                              dim_capsule=self.dim_capsule,
        #                              routings=self.routings,
        #                              kernel_size=(filter, 1),
        #                              share_weights=True)(x_bi)
        #     conv_pools.append(capsule)
        # capsule = Concatenate(axis=-1)(conv_pools)

        capsule = Flatten()(capsule)
        capsule = Dropout(self.dropout)(capsule)
        output = Dense(self.label, activation=self.activate_classify)(capsule)
        self.model = Model(inputs=self.word_embedding.input, outputs=output)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:39,代碼來源:graph.py

示例13: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        # x = embedding_output
        x = Lambda(lambda x : x[:, -2:-1, :])(embedding_output) # 獲取CLS
        # # text cnn
        # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
        # concat_out = []
        # for index, filter_size in enumerate(self.filters):
        #     x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
        #                filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
        #                strides=1,
        #                kernel_size=self.filters[index],
        #                padding='valid',
        #                kernel_initializer='normal',
        #                activation='relu')(bert_output_emmbed)
        #     x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
        #     concat_out.append(x)
        # x = Concatenate(axis=1)(concat_out)
        # x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output_layers = [dense_layer]
        self.model = Model(self.word_embedding.input, output_layers)
        self.model.summary(120) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:33,代碼來源:graph.py

示例14: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def create_model(self, hyper_parameters):
        """
            構建神經網絡
        :param hyper_parameters:json,  hyper parameters of network
        :return: tensor, moedl
        """
        super().create_model(hyper_parameters)
        embedding_output = self.word_embedding.output
        x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 獲取CLS
        # # text cnn
        # bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
        # concat_out = []
        # for index, filter_size in enumerate(self.filters):
        #     x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
        #                filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
        #                strides=1,
        #                kernel_size=self.filters[index],
        #                padding='valid',
        #                kernel_initializer='normal',
        #                activation='relu')(bert_output_emmbed)
        #     x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
        #     concat_out.append(x)
        # x = Concatenate(axis=1)(concat_out)
        # x = Dropout(self.dropout)(x)
        x = Flatten()(x)
        # 最後就是softmax
        dense_layer = Dense(self.label, activation=self.activate_classify)(x)
        output_layers = [dense_layer]
        self.model = Model(self.word_embedding.input, output_layers)
        self.model.summary(132) 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:32,代碼來源:graph.py

示例15: test_dropout

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import SpatialDropout1D [as 別名]
def test_dropout():
    layer_test(layers.Dropout,
               kwargs={'rate': 0.5},
               input_shape=(3, 2))

    layer_test(layers.Dropout,
               kwargs={'rate': 0.5, 'noise_shape': [3, 1]},
               input_shape=(3, 2))

    layer_test(layers.Dropout,
               kwargs={'rate': 0.5, 'noise_shape': [None, 1]},
               input_shape=(3, 2))

    layer_test(layers.SpatialDropout1D,
               kwargs={'rate': 0.5},
               input_shape=(2, 3, 4))

    for data_format in ['channels_last', 'channels_first']:
        for shape in [(4, 5), (4, 5, 6)]:
            if data_format == 'channels_last':
                input_shape = (2,) + shape + (3,)
            else:
                input_shape = (2, 3) + shape
            layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D,
                       kwargs={'rate': 0.5,
                               'data_format': data_format},
                       input_shape=input_shape)

            # Test invalid use cases
            with pytest.raises(ValueError):
                layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D,
                           kwargs={'rate': 0.5,
                                   'data_format': 'channels_middle'},
                           input_shape=input_shape) 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:36,代碼來源:core_test.py


注:本文中的keras.layers.SpatialDropout1D方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。