當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.MaxPooling1D方法代碼示例

本文整理匯總了Python中keras.layers.MaxPooling1D方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.MaxPooling1D方法的具體用法?Python layers.MaxPooling1D怎麽用?Python layers.MaxPooling1D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.layers的用法示例。


在下文中一共展示了layers.MaxPooling1D方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def create_model(time_window_size, metric):
        model = Sequential()

        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
                         input_shape=(time_window_size, 1)))
        model.add(MaxPooling1D(pool_size=4))

        model.add(LSTM(64))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])

        # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        # model.compile(optimizer="sgd", loss="mse", metrics=[metric])

        print(model.summary())
        return model 
開發者ID:chen0040,項目名稱:keras-anomaly-detection,代碼行數:20,代碼來源:recurrent.py

示例2: downsampling

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def downsampling(inputs, pool_type='max'):
    """
        In addition, downsampling with stride 2 essentially doubles the effective coverage 
        (i.e., coverage in the original document) of the convolution kernel; 
        therefore, after going through downsampling L times, 
        associations among words within a distance in the order of 2L can be represented. 
        Thus, deep pyramid CNN is computationally efficient for representing long-range associations 
        and so more global information. 
        參考: https://github.com/zonetrooper32/VDCNN/blob/keras_version/vdcnn.py
    :param inputs: tensor,
    :param pool_type: str, select 'max', 'k-max' or 'conv'
    :return: tensor,
    """
    if pool_type == 'max':
        output = MaxPooling1D(pool_size=3, strides=2, padding='SAME')(inputs)
    elif pool_type == 'k-max':
        output = k_max_pooling(top_k=int(K.int_shape(inputs)[1]/2))(inputs)
    elif pool_type == 'conv':
        output = Conv1D(kernel_size=3, strides=2, padding='SAME')(inputs)
    else:
        output = MaxPooling1D(pool_size=3, strides=2, padding='SAME')(inputs)
    return output 
開發者ID:yongzhuo,項目名稱:Keras-TextClassification,代碼行數:24,代碼來源:graph.py

示例3: bidLstm_simple

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate))(input_layer)
    x = Dropout(dropout_rate)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b])
    x = Dense(dense_size, activation="relu")(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()
    model.compile(loss='binary_crossentropy', 
        optimizer='adam', 
        metrics=['accuracy'])
    return model


# bidirectional LSTM with attention layer 
開發者ID:kermitt2,項目名稱:delft,代碼行數:27,代碼來源:models.py

示例4: cnn

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = Dropout(dropout_rate)(input_layer) 
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = GRU(recurrent_units)(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(dense_size, activation="relu")(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()  
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model 
開發者ID:kermitt2,項目名稱:delft,代碼行數:21,代碼來源:models.py

示例5: cnn2_best

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def cnn2_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = Dropout(dropout_rate)(input_layer) 
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    #x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    #x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    #x = MaxPooling1D(pool_size=2)(x)
    x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate)(x)
    #x = Dropout(dropout_rate)(x)
    x = Dense(dense_size, activation="relu")(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()  
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model 
開發者ID:kermitt2,項目名稱:delft,代碼行數:22,代碼來源:models.py

示例6: cnn2

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = Dropout(dropout_rate)(input_layer) 
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    #x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    #x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    #x = MaxPooling1D(pool_size=2)(x)
    x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate)(x)
    #x = Dropout(dropout_rate)(x)
    x = Dense(dense_size, activation="relu")(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()  
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model 
開發者ID:kermitt2,項目名稱:delft,代碼行數:22,代碼來源:models.py

示例7: conv

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def conv(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    filter_kernels = [7, 7, 5, 5, 3, 3]
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    conv = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[0], border_mode='valid', activation='relu')(input_layer)
    conv = MaxPooling1D(pool_length=3)(conv)
    conv1 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[1], border_mode='valid', activation='relu')(conv)
    conv1 = MaxPooling1D(pool_length=3)(conv1)
    conv2 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[2], border_mode='valid', activation='relu')(conv1)
    conv3 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[3], border_mode='valid', activation='relu')(conv2)
    conv4 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[4], border_mode='valid', activation='relu')(conv3)
    conv5 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[5], border_mode='valid', activation='relu')(conv4)
    conv5 = MaxPooling1D(pool_length=3)(conv5)
    conv5 = Flatten()(conv5)
    z = Dropout(0.5)(Dense(dense_size, activation='relu')(conv5))
    #x = GlobalMaxPool1D()(x)
    x = Dense(nb_classes, activation="sigmoid")(z)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()  
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model


# LSTM + conv 
開發者ID:kermitt2,項目名稱:delft,代碼行數:27,代碼來源:models.py

示例8: byte_block

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def byte_block(in_layer, nb_filter=(64, 100), filter_length=(3, 3), subsample=(2, 1), pool_length=(2, 2)):
    block = in_layer
    for i in range(len(nb_filter)):
        block = Conv1D(filters=nb_filter[i],
                       kernel_size=filter_length[i],
                       padding='valid',
                       activation='tanh',
                       strides=subsample[i])(block)
        # block = BatchNormalization()(block)
        # block = Dropout(0.1)(block)
        if pool_length[i]:
            block = MaxPooling1D(pool_size=pool_length[i])(block)

    # block = Lambda(max_1d, output_shape=(nb_filter[-1],))(block)
    block = GlobalMaxPool1D()(block)
    block = Dense(128, activation='relu')(block)
    return block 
開發者ID:echowei,項目名稱:DeepTraffic,代碼行數:19,代碼來源:iscx2012_cnn_rnn_5class.py

示例9: __call__

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(self.l1_decay, self.l2_decay)
        x = kl.Conv1D(128, 11,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        x = kl.Flatten()(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Dense(self.nb_hidden,
                     kernel_initializer=self.init,
                     kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
開發者ID:cangermueller,項目名稱:deepcpg,代碼行數:22,代碼來源:dna.py

示例10: build_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix):

    sequence_input = Input(shape=(sequence_length,), dtype='int32')
    embedding_layer = Embedding(input_dim=vocab_size,
                                output_dim=embedding_dim,
                                weights=[embedding_matrix],
                                input_length=sequence_length,
                                trainable=False,
                                name="embedding")(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedding_layer)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(20, activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model 
開發者ID:PacktPublishing,項目名稱:Deep-Learning-Quick-Reference,代碼行數:25,代碼來源:newsgroup_classifier_pretrained_word_embeddings.py

示例11: build_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def build_model(vocab_size, embedding_dim, sequence_length):
    sequence_input = Input(shape=(sequence_length,), dtype='int32')
    embedding_layer = Embedding(input_dim=vocab_size,
                                output_dim=embedding_dim,
                                input_length=sequence_length,
                                name="embedding")(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedding_layer)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(20, activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model 
開發者ID:PacktPublishing,項目名稱:Deep-Learning-Quick-Reference,代碼行數:22,代碼來源:newsgroup_classifier_word_embeddings.py

示例12: wdcnn

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding,  pool_size, BatchNormal):
    """wdcnn層神經元

    :param filters: 卷積核的數目,整數
    :param kernerl_size: 卷積核的尺寸,整數
    :param strides: 步長,整數
    :param conv_padding: 'same','valid'
    :param pool_padding: 'same','valid'
    :param pool_size: 池化層核尺寸,整數
    :param BatchNormal: 是否Batchnormal,布爾值
    :return: model
    """
    model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides,
                     padding=conv_padding, kernel_regularizer=l2(1e-4)))
    if BatchNormal:
        model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding))
    return model

# 實例化序貫模型 
開發者ID:AaronCosmos,項目名稱:wdcnn_bearning_fault_diagnosis,代碼行數:23,代碼來源:main.py

示例13: __init__

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def __init__(self):
        from keras.preprocessing import sequence
        from keras.models import load_model
        from keras.models import Sequential
        from keras.preprocessing import sequence
        from keras.layers import Dense, Dropout, Activation, Lambda, Input, merge, Flatten
        from keras.layers import Embedding
        from keras.layers import Convolution1D, MaxPooling1D
        from keras import backend as K
        from keras.models import Model
        from keras.regularizers import l2
        global sequence, load_model, Sequential, Dense, Dropout, Activation, Lambda, Input, merge, Flatten
        global Embedding, Convolution1D, MaxPooling1D, K, Model, l2
        self.svm_clf = MiniClassifier(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_svm_weights.npz'))
        cnn_weight_files = glob.glob(os.path.join(robotreviewer.DATA_ROOT, 'rct/*.h5'))
        self.cnn_clfs = [load_model(cnn_weight_file) for cnn_weight_file in cnn_weight_files]
        self.svm_vectorizer = HashingVectorizer(binary=False, ngram_range=(1, 1), stop_words='english')
        self.cnn_vectorizer = KerasVectorizer(vocab_map_file=os.path.join(robotreviewer.DATA_ROOT, 'rct/cnn_vocab_map.pck'), stop_words='english')
        with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_model_calibration.json'), 'r') as f:
            self.constants = json.load(f)

        self.calibration_lr = {}
        with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_ptyp_calibration.pck'), 'rb') as f:
            self.calibration_lr['svm_cnn_ptyp'] = pickle.load(f)

        with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_calibration.pck'), 'rb') as f:
            self.calibration_lr['svm_cnn'] = pickle.load(f) 
開發者ID:ijmarshall,項目名稱:robotreviewer,代碼行數:29,代碼來源:rct_robot.py

示例14: cnn_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def cnn_model(max_len=400,
              vocabulary_size=20000,
              embedding_dim=128,
              hidden_dim=128,
              num_filters=512,
              filter_sizes="3,4,5",
              num_classses=4,
              dropout=0.5):
    print("Creating text CNN Model...")
    # a tensor
    inputs = Input(shape=(max_len,), dtype='int32')
    # emb
    embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim,
                          input_length=max_len, name="embedding")(inputs)
    # convolution block
    if "," in filter_sizes:
        filter_sizes = filter_sizes.split(",")
    else:
        filter_sizes = [3, 4, 5]
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=int(sz),
                             strides=1,
                             padding='valid',
                             activation='relu')(embedding)
        conv = MaxPooling1D()(conv)
        conv = Flatten()(conv)
        conv_blocks.append(conv)
    conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    dropout_layer = Dropout(dropout)(conv_concate)
    output = Dense(hidden_dim, activation='relu')(dropout_layer)
    output = Dense(num_classses, activation='softmax')(output)
    # model
    model = Model(inputs=inputs, outputs=output)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    return model 
開發者ID:shibing624,項目名稱:text-classifier,代碼行數:40,代碼來源:deep_model.py

示例15: conv1d

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import MaxPooling1D [as 別名]
def conv1d(max_len, embed_size):
    '''
    CNN without Batch Normalisation.
    :param max_len: maximum sentence numbers, default=200
    :param embed_size: ELMo embeddings dimension, default=1024
    :return: CNN without BN model
    '''
    filter_sizes = [2, 3, 4, 5, 6]
    num_filters = 128
    drop = 0.5
    inputs = Input(shape=(max_len,embed_size), dtype='float32')

    conv_0 = Conv1D(num_filters, kernel_size=(filter_sizes[0]))(inputs)
    act_0 = Activation('relu')(conv_0)
    conv_1 = Conv1D(num_filters, kernel_size=(filter_sizes[1]))(inputs)
    act_1 = Activation('relu')(conv_1)
    conv_2 = Conv1D(num_filters, kernel_size=(filter_sizes[2]))(inputs)
    act_2 = Activation('relu')(conv_2)
    conv_3 = Conv1D(num_filters, kernel_size=(filter_sizes[3]))(inputs)
    act_3 = Activation('relu')(conv_3)
    conv_4 = Conv1D(num_filters, kernel_size=(filter_sizes[4]))(inputs)
    act_4 = Activation('relu')(conv_4)

    maxpool_0 = MaxPooling1D(pool_size=(max_len - filter_sizes[0]))(act_0)
    maxpool_1 = MaxPooling1D(pool_size=(max_len - filter_sizes[1]))(act_1)
    maxpool_2 = MaxPooling1D(pool_size=(max_len - filter_sizes[2]))(act_2)
    maxpool_3 = MaxPooling1D(pool_size=(max_len - filter_sizes[3]))(act_3)
    maxpool_4 = MaxPooling1D(pool_size=(max_len - filter_sizes[4]))(act_4)

    concatenated_tensor = Concatenate()([maxpool_0, maxpool_1, maxpool_2, maxpool_3, maxpool_4])
    flatten = Flatten()(concatenated_tensor)
    dropout = Dropout(drop)(flatten)
    output = Dense(units=1, activation='sigmoid')(dropout)

    model = Model(inputs=inputs, outputs=output)
    #model = multi_gpu_model(model, gpus=gpus)
    model.summary()
    model.compile(loss='binary_crossentropy', metrics=['acc'], optimizer='adam')
    return model 
開發者ID:GateNLP,項目名稱:semeval2019-hyperpartisan-bertha-von-suttner,代碼行數:41,代碼來源:CNN_elmo.py


注:本文中的keras.layers.MaxPooling1D方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。