本文整理汇总了Python中keras.layers.GlobalAveragePooling1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.GlobalAveragePooling1D方法的具体用法?Python layers.GlobalAveragePooling1D怎么用?Python layers.GlobalAveragePooling1D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.GlobalAveragePooling1D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bidLstm_simple
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM with attention layer
示例2: fasttext_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def fasttext_model(max_len=300,
vocabulary_size=20000,
embedding_dim=128,
num_classes=4):
model = Sequential()
# embed layer by maps vocab index into emb dimensions
model.add(Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=max_len))
# pooling the embedding
model.add(GlobalAveragePooling1D())
# output multi classification of num_classes
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
示例3: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def __init__(self, nb_classes, nb_tokens, maxlen,
nb_head=8, head_size=16, nb_transformer=2,
embedding_dim=256, embeddings=None, embed_l2=1E-6,
pos_embed=False, final_dropout_rate=0.15,
embed_dropout_rate=0.15):
self.nb_classes = nb_classes
self.nb_tokens = nb_tokens
self.maxlen = maxlen
self.nb_head = nb_head
self.head_size = head_size
self.embedding_dim = embedding_dim
self.nb_transformer = nb_transformer
if embeddings is not None:
self.token_embeddings = [embeddings]
else:
self.token_embeddings = None
self.pos_embed = pos_embed
self.final_dropout_rate = final_dropout_rate
self.embed_dropout_rate = embed_dropout_rate
self.pos_embed_layer = Position_Embedding(name='position_embedding')
self.transformers = [Self_Attention(
nb_head, head_size, name='self_attention_%d' % i) for i in range(nb_transformer)]
self.pool = GlobalAveragePooling1D()
self.invalid_params = {'pos_embed_layer', 'transformers', 'pool'}
示例4: cnn_spatial_multi
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def cnn_spatial_multi(self):
# spatial stream (frozen)
cnn_spatial = self.cnn_spatial()
if self.saved_spatial_weights is None:
print("[ERROR] No saved_spatial_weights weights file!")
else:
cnn_spatial.load_weights(self.saved_spatial_weights)
for layer in cnn_spatial.layers:
layer.trainable = False
# building inputs and output
model = Sequential()
model.add(TimeDistributed((cnn_spatial), input_shape=self.input_shape_spatial_multi))
model.add(GlobalAveragePooling1D())
return model
# CNN model for the temporal stream with multiple inputs
示例5: cnn_temporal_multi
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def cnn_temporal_multi(self):
# spatial stream (frozen)
cnn_temporal = self.cnn_temporal()
if self.saved_temporal_weights is None:
print("[ERROR] No saved_temporal_weights weights file!")
else:
cnn_temporal.load_weights(self.saved_temporal_weights)
for layer in cnn_temporal.layers:
layer.trainable = False
# building inputs and output
model = Sequential()
model.add(TimeDistributed((cnn_temporal), input_shape=self.input_shape_temporal_multi))
model.add(GlobalAveragePooling1D())
return model
# CNN model for the spatial stream
示例6: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
embedding = self.word_embedding.output
def win_mean(x):
res_list = []
for i in range(self.len_max-self.n_win+1):
x_mean = tf.reduce_mean(x[:, i:i + self.n_win, :], axis=1)
x_mean_dims = tf.expand_dims(x_mean, axis=-1)
res_list.append(x_mean_dims)
res_list = tf.concat(res_list, axis=-1)
gg = tf.reduce_max(res_list, axis=-1)
return gg
if self.encode_type=="HIERARCHICAL":
x = Lambda(win_mean, output_shape=(self.embed_size, ))(embedding)
elif self.encode_type=="MAX":
x = GlobalMaxPooling1D()(embedding)
elif self.encode_type=="AVG":
x = GlobalAveragePooling1D()(embedding)
elif self.encode_type == "CONCAT":
x_max = GlobalMaxPooling1D()(embedding)
x_avg = GlobalAveragePooling1D()(embedding)
x = Concatenate()([x_max, x_avg])
else:
raise RuntimeError("encode_type must be 'MAX', 'AVG', 'CONCAT', 'HIERARCHICAL'")
output = Dense(self.label, activation=self.activate_classify)(x)
self.model = Model(inputs=self.word_embedding.input, outputs=output)
self.model.summary(132)
示例7: lstm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix],
# trainable=False)(inp)
x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
#x = CuDNNLSTM(recurrent_units, return_sequences=True)(x)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM
示例8: cnn3
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def cnn3(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
#x = Dropout(dropout_rate)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例9: gru
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def gru(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(x)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
示例10: gru_simple
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def gru_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
# bid GRU + bid LSTM
示例11: mix1
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def mix1(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
# DPCNN
示例12: build_model_avt_cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def build_model_avt_cnn(self):
#########text-cnn#########
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# text cnn
bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
concat_x = []
concat_y = []
concat_z = []
for index, filter_size in enumerate(self.filters):
conv = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
x = GlobalMaxPooling1D(name='TextCNN_MaxPooling1D_{}'.format(index))(conv)
y = GlobalAveragePooling1D(name='TextCNN_AveragePooling1D_{}'.format(index))(conv)
z = AttentionWeightedAverage(name='TextCNN_Annention_{}'.format(index))(conv)
concat_x.append(x)
concat_y.append(y)
concat_z.append(z)
merge_x = Concatenate(axis=1)(concat_x)
merge_y = Concatenate(axis=1)(concat_y)
merge_z = Concatenate(axis=1)(concat_z)
merge_xyz = Concatenate(axis=1)([merge_x, merge_y, merge_z])
x = Dropout(self.keep_prob)(merge_xyz)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
示例13: Archi_3CONV64C_1FC256_GAP_f3fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def Archi_3CONV64C_1FC256_GAP_f3fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_conv = 640 #-- will be double
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = X_input
for add in range(nb_conv):
X = conv_bn_relu_drop(X, nbunits=nbunits_conv, kernel_size=3, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- Flatten + 1 FC layers
X = GlobalAveragePooling1D()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV64C_1FC256_GAP_f3fd')
#-----------------------------------------------------------------------
示例14: Archi_3CONV64C_1FC256_GAP_f5fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def Archi_3CONV64C_1FC256_GAP_f5fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_conv = 512 #-- will be double
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = X_input
for add in range(nb_conv):
X = conv_bn_relu_drop(X, nbunits=nbunits_conv, kernel_size=5, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- Flatten + 1 FC layers
X = GlobalAveragePooling1D()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV64C_1FC256_GAP_f5fd')
#-----------------------------------------------------------------------
示例15: Archi_3CONV64C_1FC256_GAP_f9fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import GlobalAveragePooling1D [as 别名]
def Archi_3CONV64C_1FC256_GAP_f9fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_conv = 384 #-- will be double
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = X_input
for add in range(nb_conv):
X = conv_bn_relu_drop(X, nbunits=nbunits_conv, kernel_size=9, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- Flatten + 1 FC layers
X = GlobalAveragePooling1D()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV64C_1FC256_GAP_f9fd')
#-----------------------------------------------------------------------