本文整理汇总了Python中keras.layers.GlobalMaxPooling1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.GlobalMaxPooling1D方法的具体用法?Python layers.GlobalMaxPooling1D怎么用?Python layers.GlobalMaxPooling1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.GlobalMaxPooling1D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: VariousConv1D
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def VariousConv1D(x, filter_sizes, num_filters, name_prefix=''):
'''
Layer wrapper function for various filter sizes Conv1Ds
# Arguments:
x: tensor, shape = (B, T, E)
filter_sizes: list of int, list of each Conv1D filter sizes
num_filters: list of int, list of each Conv1D num of filters
name_prefix: str, layer name prefix
# Returns:
out: tensor, shape = (B, sum(num_filters))
'''
conv_outputs = []
for filter_size, n_filter in zip(filter_sizes, num_filters):
conv_name = '{}VariousConv1D/Conv1D/filter_size_{}'.format(name_prefix, filter_size)
pooling_name = '{}VariousConv1D/MaxPooling/filter_size_{}'.format(name_prefix, filter_size)
conv_out = Conv1D(n_filter, filter_size, name=conv_name)(x) # (B, time_steps, n_filter)
conv_out = GlobalMaxPooling1D(name=pooling_name)(conv_out) # (B, n_filter)
conv_outputs.append(conv_out)
concatenate_name = '{}VariousConv1D/Concatenate'.format(name_prefix)
out = Concatenate(name=concatenate_name)(conv_outputs)
return out
示例2: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
x = self.word_embedding.output
x = SpatialDropout1D(self.dropout_spatial)(x)
x = AttentionSelf(self.word_embedding.embed_size)(x)
x = GlobalMaxPooling1D()(x)
x = Dropout(self.dropout)(x)
# x = Flatten()(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activate_classify)(x)
output = [dense_layer]
self.model = Model(self.word_embedding.input, output)
self.model.summary(120)
示例3: build_model_text_cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def build_model_text_cnn(self):
######### text-cnn #########
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# text cnn
bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
concat_out = []
for index, filter_size in enumerate(self.filters):
x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
concat_out.append(x)
x = Concatenate(axis=1)(concat_out)
x = Dropout(self.keep_prob)(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
示例4: build_cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def build_cnn(input_shape, output_dim,nb_filter):
clf = Sequential()
clf.add(Convolution1D(nb_filter=nb_filter,
filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape))
clf.add(GlobalMaxPooling1D())
clf.add(Dense(100))
clf.add(Dropout(0.2))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
# just one filter
示例5: build_cnn_char
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def build_cnn_char(input_dim, output_dim,nb_filter):
clf = Sequential()
clf.add(Embedding(input_dim,
32, # character embedding size
input_length=maxlen,
dropout=0.2))
clf.add(Convolution1D(nb_filter=nb_filter,
filter_length=3,border_mode="valid",activation="relu",subsample_length=1))
clf.add(GlobalMaxPooling1D())
clf.add(Dense(100))
clf.add(Dropout(0.2))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
# just one filter
示例6: ConvolutionLayer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
x = Input(shape=(input_shape,), name='input')
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding",
weights=[embedding_matrix], trainable=word_trainable)(x)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation=act,
strides=1,
kernel_initializer=init)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dense(hidden_dim, activation="relu")(z)
y = Dense(n_classes, activation="softmax")(z)
return Model(inputs=x, outputs=y, name='classifier')
示例7: get_umtmum_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umtmum = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'umtmum_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input)
output = conv_umtmum(path_input)
output = GlobalMaxPooling1D()(output)
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input)
tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum')
output = GlobalMaxPooling1D()(output)
return output
示例8: get_umtm_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_umtm_embedding(umtm_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umtm = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'umtm_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtm_input)
output = GlobalMaxPooling1D()(conv_umtm(path_input))
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtm_input)
tmp_output = GlobalMaxPooling1D()(conv_umtm(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtm')
output = GlobalMaxPooling1D()(output)
return output
示例9: get_umum_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umum = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'umum_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
output = GlobalMaxPooling1D()(conv_umum(path_input))
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum')
output = GlobalMaxPooling1D()(output)
return output
示例10: get_uuum_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umum = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'uuum_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
output = GlobalMaxPooling1D()(conv_umum(path_input))
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum')
output = GlobalMaxPooling1D()(output)
return output
示例11: get_umtmum_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_umtmum_embedding(umtmum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umtmum = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'umtmum_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umtmum_input)
output = conv_umtmum(path_input)
output = GlobalMaxPooling1D()(output)
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umtmum_input)
tmp_output = GlobalMaxPooling1D()(conv_umtmum(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umtmum')
output = GlobalMaxPooling1D()(output)
return output
示例12: get_umum_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_umum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umum = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'umum_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
output = GlobalMaxPooling1D()(conv_umum(path_input))
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'umum')
output = GlobalMaxPooling1D()(output)
return output
示例13: get_uuum_embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def get_uuum_embedding(umum_input, path_num, timestamps, length, user_latent, item_latent, path_attention_layer_1, path_attention_layer_2):
conv_umum = Conv1D(filters = 128,
kernel_size = 4,
activation = 'relu',
kernel_regularizer = l2(0.0),
kernel_initializer = 'glorot_uniform',
padding = 'valid',
strides = 1,
name = 'uuum_conv')
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':0})(umum_input)
output = GlobalMaxPooling1D()(conv_umum(path_input))
output = Dropout(0.5)(output)
for i in range(1, path_num):
path_input = Lambda(slice, output_shape=(timestamps, length), arguments={'index':i})(umum_input)
tmp_output = GlobalMaxPooling1D()(conv_umum(path_input))
tmp_output = Dropout(0.5)(tmp_output)
output = concatenate([output, tmp_output])
output = Reshape((path_num, 128))(output)
#output = path_attention(user_latent, item_latent, output, 128, 64, path_attention_layer_1, path_attention_layer_2, 'uuum')
output = GlobalMaxPooling1D()(output)
return output
示例14: ConvolutionLayer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'):
if embedding_matrix is not None:
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,),
weights=[embedding_matrix], trainable=word_trainable)(x)
else:
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation=act,
strides=1,
kernel_initializer=init)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dense(hidden_dim, activation="relu")(z)
y = Dense(n_classes, activation="softmax")(z)
return Model(inputs=x, outputs=y)
示例15: build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalMaxPooling1D [as 别名]
def build_model(vocab_size, embedding_dim, sequence_length, embedding_matrix):
sequence_input = Input(shape=(sequence_length,), dtype='int32')
embedding_layer = Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
weights=[embedding_matrix],
input_length=sequence_length,
trainable=False,
name="embedding")(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedding_layer)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(20, activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
开发者ID:PacktPublishing,项目名称:Deep-Learning-Quick-Reference,代码行数:25,代码来源:newsgroup_classifier_pretrained_word_embeddings.py