本文整理汇总了Python中keras.layers.MaxPooling1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.MaxPooling1D方法的具体用法?Python layers.MaxPooling1D怎么用?Python layers.MaxPooling1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.MaxPooling1D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def create_model(time_window_size, metric):
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
input_shape=(time_window_size, 1)))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(64))
model.add(Dense(units=time_window_size, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
# model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
# model.compile(optimizer="sgd", loss="mse", metrics=[metric])
print(model.summary())
return model
示例2: downsampling
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def downsampling(inputs, pool_type='max'):
"""
In addition, downsampling with stride 2 essentially doubles the effective coverage
(i.e., coverage in the original document) of the convolution kernel;
therefore, after going through downsampling L times,
associations among words within a distance in the order of 2L can be represented.
Thus, deep pyramid CNN is computationally efficient for representing long-range associations
and so more global information.
参考: https://github.com/zonetrooper32/VDCNN/blob/keras_version/vdcnn.py
:param inputs: tensor,
:param pool_type: str, select 'max', 'k-max' or 'conv'
:return: tensor,
"""
if pool_type == 'max':
output = MaxPooling1D(pool_size=3, strides=2, padding='SAME')(inputs)
elif pool_type == 'k-max':
output = k_max_pooling(top_k=int(K.int_shape(inputs)[1]/2))(inputs)
elif pool_type == 'conv':
output = Conv1D(kernel_size=3, strides=2, padding='SAME')(inputs)
else:
output = MaxPooling1D(pool_size=3, strides=2, padding='SAME')(inputs)
return output
示例3: bidLstm_simple
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM with attention layer
示例4: cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units)(x)
x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例5: cnn2_best
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def cnn2_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例6: cnn2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例7: conv
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def conv(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
filter_kernels = [7, 7, 5, 5, 3, 3]
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
conv = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[0], border_mode='valid', activation='relu')(input_layer)
conv = MaxPooling1D(pool_length=3)(conv)
conv1 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[1], border_mode='valid', activation='relu')(conv)
conv1 = MaxPooling1D(pool_length=3)(conv1)
conv2 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[2], border_mode='valid', activation='relu')(conv1)
conv3 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[3], border_mode='valid', activation='relu')(conv2)
conv4 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[4], border_mode='valid', activation='relu')(conv3)
conv5 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[5], border_mode='valid', activation='relu')(conv4)
conv5 = MaxPooling1D(pool_length=3)(conv5)
conv5 = Flatten()(conv5)
z = Dropout(0.5)(Dense(dense_size, activation='relu')(conv5))
#x = GlobalMaxPool1D()(x)
x = Dense(nb_classes, activation="sigmoid")(z)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# LSTM + conv
示例8: byte_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def byte_block(in_layer, nb_filter=(64, 100), filter_length=(3, 3), subsample=(2, 1), pool_length=(2, 2)):
block = in_layer
for i in range(len(nb_filter)):
block = Conv1D(filters=nb_filter[i],
kernel_size=filter_length[i],
padding='valid',
activation='tanh',
strides=subsample[i])(block)
# block = BatchNormalization()(block)
# block = Dropout(0.1)(block)
if pool_length[i]:
block = MaxPooling1D(pool_size=pool_length[i])(block)
# block = Lambda(max_1d, output_shape=(nb_filter[-1],))(block)
block = GlobalMaxPool1D()(block)
block = Dense(128, activation='relu')(block)
return block
示例9: __call__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def __call__(self, inputs):
x = inputs[0]
kernel_regularizer = kr.L1L2(self.l1_decay, self.l2_decay)
x = kl.Conv1D(128, 11,
kernel_initializer=self.init,
kernel_regularizer=kernel_regularizer)(x)
x = kl.Activation('relu')(x)
x = kl.MaxPooling1D(4)(x)
x = kl.Flatten()(x)
kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
x = kl.Dense(self.nb_hidden,
kernel_initializer=self.init,
kernel_regularizer=kernel_regularizer)(x)
x = kl.Activation('relu')(x)
x = kl.Dropout(self.dropout)(x)
return self._build(inputs, x)
示例10: build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix):
sequence_input = Input(shape=(sequence_length,), dtype='int32')
embedding_layer = Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
weights=[embedding_matrix],
input_length=sequence_length,
trainable=False,
name="embedding")(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedding_layer)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(20, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
开发者ID:PacktPublishing,项目名称:Deep-Learning-Quick-Reference,代码行数:25,代码来源:newsgroup_classifier_pretrained_word_embeddings.py
示例11: build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def build_model(vocab_size, embedding_dim, sequence_length):
sequence_input = Input(shape=(sequence_length,), dtype='int32')
embedding_layer = Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=sequence_length,
name="embedding")(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedding_layer)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(20, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
开发者ID:PacktPublishing,项目名称:Deep-Learning-Quick-Reference,代码行数:22,代码来源:newsgroup_classifier_word_embeddings.py
示例12: wdcnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding, pool_size, BatchNormal):
"""wdcnn层神经元
:param filters: 卷积核的数目,整数
:param kernerl_size: 卷积核的尺寸,整数
:param strides: 步长,整数
:param conv_padding: 'same','valid'
:param pool_padding: 'same','valid'
:param pool_size: 池化层核尺寸,整数
:param BatchNormal: 是否Batchnormal,布尔值
:return: model
"""
model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides,
padding=conv_padding, kernel_regularizer=l2(1e-4)))
if BatchNormal:
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding))
return model
# 实例化序贯模型
示例13: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def __init__(self):
from keras.preprocessing import sequence
from keras.models import load_model
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.layers import Dense, Dropout, Activation, Lambda, Input, merge, Flatten
from keras.layers import Embedding
from keras.layers import Convolution1D, MaxPooling1D
from keras import backend as K
from keras.models import Model
from keras.regularizers import l2
global sequence, load_model, Sequential, Dense, Dropout, Activation, Lambda, Input, merge, Flatten
global Embedding, Convolution1D, MaxPooling1D, K, Model, l2
self.svm_clf = MiniClassifier(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_svm_weights.npz'))
cnn_weight_files = glob.glob(os.path.join(robotreviewer.DATA_ROOT, 'rct/*.h5'))
self.cnn_clfs = [load_model(cnn_weight_file) for cnn_weight_file in cnn_weight_files]
self.svm_vectorizer = HashingVectorizer(binary=False, ngram_range=(1, 1), stop_words='english')
self.cnn_vectorizer = KerasVectorizer(vocab_map_file=os.path.join(robotreviewer.DATA_ROOT, 'rct/cnn_vocab_map.pck'), stop_words='english')
with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/rct_model_calibration.json'), 'r') as f:
self.constants = json.load(f)
self.calibration_lr = {}
with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_ptyp_calibration.pck'), 'rb') as f:
self.calibration_lr['svm_cnn_ptyp'] = pickle.load(f)
with open(os.path.join(robotreviewer.DATA_ROOT, 'rct/svm_cnn_calibration.pck'), 'rb') as f:
self.calibration_lr['svm_cnn'] = pickle.load(f)
示例14: cnn_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def cnn_model(max_len=400,
vocabulary_size=20000,
embedding_dim=128,
hidden_dim=128,
num_filters=512,
filter_sizes="3,4,5",
num_classses=4,
dropout=0.5):
print("Creating text CNN Model...")
# a tensor
inputs = Input(shape=(max_len,), dtype='int32')
# emb
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim,
input_length=max_len, name="embedding")(inputs)
# convolution block
if "," in filter_sizes:
filter_sizes = filter_sizes.split(",")
else:
filter_sizes = [3, 4, 5]
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=int(sz),
strides=1,
padding='valid',
activation='relu')(embedding)
conv = MaxPooling1D()(conv)
conv = Flatten()(conv)
conv_blocks.append(conv)
conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
dropout_layer = Dropout(dropout)(conv_concate)
output = Dense(hidden_dim, activation='relu')(dropout_layer)
output = Dense(num_classses, activation='softmax')(output)
# model
model = Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
示例15: conv1d
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling1D [as 别名]
def conv1d(max_len, embed_size):
'''
CNN without Batch Normalisation.
:param max_len: maximum sentence numbers, default=200
:param embed_size: ELMo embeddings dimension, default=1024
:return: CNN without BN model
'''
filter_sizes = [2, 3, 4, 5, 6]
num_filters = 128
drop = 0.5
inputs = Input(shape=(max_len,embed_size), dtype='float32')
conv_0 = Conv1D(num_filters, kernel_size=(filter_sizes[0]))(inputs)
act_0 = Activation('relu')(conv_0)
conv_1 = Conv1D(num_filters, kernel_size=(filter_sizes[1]))(inputs)
act_1 = Activation('relu')(conv_1)
conv_2 = Conv1D(num_filters, kernel_size=(filter_sizes[2]))(inputs)
act_2 = Activation('relu')(conv_2)
conv_3 = Conv1D(num_filters, kernel_size=(filter_sizes[3]))(inputs)
act_3 = Activation('relu')(conv_3)
conv_4 = Conv1D(num_filters, kernel_size=(filter_sizes[4]))(inputs)
act_4 = Activation('relu')(conv_4)
maxpool_0 = MaxPooling1D(pool_size=(max_len - filter_sizes[0]))(act_0)
maxpool_1 = MaxPooling1D(pool_size=(max_len - filter_sizes[1]))(act_1)
maxpool_2 = MaxPooling1D(pool_size=(max_len - filter_sizes[2]))(act_2)
maxpool_3 = MaxPooling1D(pool_size=(max_len - filter_sizes[3]))(act_3)
maxpool_4 = MaxPooling1D(pool_size=(max_len - filter_sizes[4]))(act_4)
concatenated_tensor = Concatenate()([maxpool_0, maxpool_1, maxpool_2, maxpool_3, maxpool_4])
flatten = Flatten()(concatenated_tensor)
dropout = Dropout(drop)(flatten)
output = Dense(units=1, activation='sigmoid')(dropout)
model = Model(inputs=inputs, outputs=output)
#model = multi_gpu_model(model, gpus=gpus)
model.summary()
model.compile(loss='binary_crossentropy', metrics=['acc'], optimizer='adam')
return model