本文整理汇总了Python中keras.layers.AveragePooling1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.AveragePooling1D方法的具体用法?Python layers.AveragePooling1D怎么用?Python layers.AveragePooling1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.AveragePooling1D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bidLstm_simple
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM with attention layer
示例2: get_contextual_spatial_gated_input
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def get_contextual_spatial_gated_input(X, conf_dict):
# X: input to be gated, (None, steps, x_dim)
# return X' = X * sigmoid(Dense(Average(f(X)))), f is a non-linear function.
assert len(X._keras_shape) == 3, [X._keras_shape]
seq_len, x_dim = X._keras_shape[1], X._keras_shape[2]
gating_hidden_dim = conf_dict['gating_hidden_dim']
gating_hidden_actv = conf_dict['gating_hidden_actv']
Xp = ReshapeBatchAdhoc()(X)
Xp = Dense(gating_hidden_dim, activation=gating_hidden_actv)(Xp)
#Xp = Lambda(lambda x: x * 0)(Xp)
Xp = ReshapeBatchAdhoc(mid_dim=seq_len)(Xp)
Xp = AveragePooling1D(seq_len)(Xp) # (None, 1, x_dim)
Xp = Reshape((Xp._keras_shape[-1], ))(Xp)
Xp = Dense(x_dim, activation='sigmoid')(Xp)
Xp = Reshape((1, x_dim))(Xp)
X = DotMergeAdhoc()([X, Xp])
return X
示例3: lstm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix],
# trainable=False)(inp)
x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
#x = CuDNNLSTM(recurrent_units, return_sequences=True)(x)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM
示例4: cnn3
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def cnn3(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
#x = Dropout(dropout_rate)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例5: gru
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def gru(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(x)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
示例6: gru_best
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def gru_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(x)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
#optimizer=RMSprop(clipvalue=1, clipnorm=1),
optimizer='adam',
metrics=['accuracy'])
return model
# 1 layer bid GRU
示例7: Archi_3CONV2AP_1FC256_f33_17_9fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_f33_17_9fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=128, kernel_size=33, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=192, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=256, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = Flatten()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f33_17_9fd')
#-----------------------------------------------------------------------
示例8: Archi_3CONV2AP_1FC256_f17_9_5fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_f17_9_5fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=128, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=256, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=384, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = Flatten()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f17_9_5fd')
#-----------------------------------------------------------------------
示例9: Archi_3CONV2AP_1FC256_f9_5_3fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_f9_5_3fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_conv = 128 #-- will be double
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=nbunits_conv*2**2, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = Flatten()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f9_5_3fd')
#-----------------------------------------------------------------------
示例10: Archi_3CONV2AP_1FC256_f5_3_1fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_f5_3_1fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_conv = 128 #-- will be double
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
#~ X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = Flatten()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f5_3_1fd')
#-----------------------------------------------------------------------
示例11: Archi_3CONV2AP_1FC256_f3_1_1fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_f3_1_1fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_conv = 128 #-- will be double
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=nbunits_conv, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=nbunits_conv, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = Flatten()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f3_1_1fd')
#-----------------------------------------------------------------------
示例12: Archi_3CONV2AP_1FC256_GAP_f17_9_5fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_GAP_f17_9_5fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=256, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = GlobalAveragePooling1D()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f17_9_5fd')
#-----------------------------------------------------------------------
示例13: Archi_3CONV2AP_1FC256_GAP_f9_5_3fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_GAP_f9_5_3fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=512, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = GlobalAveragePooling1D()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f9_5_3fd')
#-----------------------------------------------------------------------
示例14: Archi_3CONV2AP_1FC256_GAP_f5_3_1fd
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def Archi_3CONV2AP_1FC256_GAP_f5_3_1fd(X, nbclasses):
#-- get the input sizes
m, L, depth = X.shape
input_shape = (L,depth)
#-- parameters of the architecture
l2_rate = 1.e-6
dropout_rate = 0.5
nb_conv = 3
nb_fc= 1
nbunits_fc = 256 #-- will be double
# Define the input placeholder.
X_input = Input(input_shape)
#-- nb_conv CONV layers
X = conv_bn_relu(X_input, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
X = Dropout(dropout_rate)(X)
X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
X = Dropout(dropout_rate)(X)
#-- Flatten + 1 FC layers
X = GlobalAveragePooling1D()(X)
for add in range(nb_fc):
X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
#-- SOFTMAX layer
out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
# Create model.
return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f5_3_1fd')
#-----------------------------------------------------------------------
示例15: get_contextual_temporal_gated_input
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import AveragePooling1D [as 别名]
def get_contextual_temporal_gated_input(X, conf_dict):
# X: input to be gated, (None, steps, x_dim)
# return X' = X * c * softmax(X.Average(f(X))), f is a non-linear function.
assert len(X._keras_shape) == 3, [X._keras_shape]
seq_len, x_dim = X._keras_shape[1], X._keras_shape[2]
gating_hidden_dim = conf_dict['gating_hidden_dim']
gating_hidden_actv = conf_dict['gating_hidden_actv']
scale = conf_dict['scale']
nl_choice = conf_dict['nl_choice']
Xp = ReshapeBatchAdhoc()(X)
Xp = Dense(gating_hidden_dim, activation=gating_hidden_actv)(Xp)
Xp = ReshapeBatchAdhoc(mid_dim=seq_len)(Xp)
Xp = AveragePooling1D(seq_len)(Xp) # (None, 1, x_dim)
Xp = Reshape((Xp._keras_shape[-1], ))(Xp)
if nl_choice == 'nl':
Xp = Dense(x_dim, activation='relu', bias=True)(Xp)
elif nl_choice == 'bn+nl':
Xp = BatchNormalization()(Xp)
Xp = Dense(x_dim, activation='relu', bias=True)(Xp)
elif nl_choice == 'bn+l':
Xp = BatchNormalization()(Xp)
Xp = Dense(x_dim, activation='linear', bias=True)(Xp)
else:
assert False, 'nonononon'
Xp = Reshape((1, x_dim))(Xp) # (None, 1, x_dim)
Xp = DotSumMergeAdhoc()([X, Xp]) # (None, steps, 1)
if True: # debug
Xp = Activation('sigmoid')(Xp) # (None, steps, 1)
else:
# following can be uncomment to replace sigmoid with softmax
Xp = Reshape((Xp._keras_shape[1], ))(Xp) # (None, steps)
Xp = Activation('softmax')(Xp) # (None, steps)
Xp = Reshape((Xp._keras_shape[-1], 1))(Xp) # (None, steps, 1)
X = DotMergeAdhoc(scale=scale)([X, Xp]) # (None, steps, x_dim)
return X