本文整理汇总了Python中keras.layers.SpatialDropout1D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.SpatialDropout1D方法的具体用法?Python layers.SpatialDropout1D怎么用?Python layers.SpatialDropout1D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.SpatialDropout1D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: CapsuleNet
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(170,))
x = Embedding(21099, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例2: CapsuleNet_v2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(200,))
x = Embedding(20000, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例3: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
x = self.word_embedding.output
x = SpatialDropout1D(self.dropout_spatial)(x)
x = AttentionSelf(self.word_embedding.embed_size)(x)
x = GlobalMaxPooling1D()(x)
x = Dropout(self.dropout)(x)
# x = Flatten()(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activate_classify)(x)
output = [dense_layer]
self.model = Model(self.word_embedding.input, output)
self.model.summary(120)
示例4: build_model_text_cnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def build_model_text_cnn(self):
######### text-cnn #########
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# text cnn
bert_output_emmbed = SpatialDropout1D(rate=self.keep_prob)(bert_output)
concat_out = []
for index, filter_size in enumerate(self.filters):
x = Conv1D(name='TextCNN_Conv1D_{}'.format(index), filters=int(self.embedding_dim/2), kernel_size=self.filters[index], padding='valid', kernel_initializer='normal', activation='relu')(bert_output_emmbed)
x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
concat_out.append(x)
x = Concatenate(axis=1)(concat_out)
x = Dropout(self.keep_prob)(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
示例5: keras_dropout
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def keras_dropout(layer, rate):
"""
Keras dropout layer.
"""
from keras import layers
input_dim = len(layer.input.shape)
if input_dim == 2:
return layers.SpatialDropout1D(rate)
elif input_dim == 3:
return layers.SpatialDropout2D(rate)
elif input_dim == 4:
return layers.SpatialDropout3D(rate)
else:
return layers.Dropout(rate)
示例6: Token_Embedding
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def Token_Embedding(x, input_dim, output_dim, embed_weights=None,
mask_zero=False, input_length=None, dropout_rate=0,
embed_l2=1E-6, name='', time_distributed=False, **kwargs):
"""
Basic token embedding layer, also included some dropout layer.
"""
embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None
embed_layer = Embedding(input_dim=input_dim,
output_dim=output_dim,
weights=embed_weights,
mask_zero=mask_zero,
input_length=input_length,
embeddings_regularizer=embed_reg,
name=name)
if time_distributed:
embed = TimeDistributed(embed_layer)(x)
else:
embed = embed_layer(x)
# entire embedding channels are dropped out instead of the
# normal Keras embedding dropout, which drops all channels for entire words
# many of the datasets contain so few words that losing one or more words can alter the emotions completely
if dropout_rate != 0:
embed = SpatialDropout1D(dropout_rate)(embed)
return embed
示例7: dummy_1_build_fn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def dummy_1_build_fn(input_shape=(1,)):
model = Sequential(
[
Embedding(input_dim=9999, output_dim=200, input_length=100, trainable=True),
SpatialDropout1D(rate=0.5),
Flatten(),
Dense(100, activation="relu"),
Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer=RMSprop(lr=0.02, decay=0.001),
loss=mean_absolute_error,
metrics=["mean_absolute_error"],
)
return model
示例8: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
embedding_output = self.word_embedding.output
x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS
# # text cnn
# bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
# concat_out = []
# for index, filter_size in enumerate(self.filters):
# x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
# filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
# strides=1,
# kernel_size=self.filters[index],
# padding='valid',
# kernel_initializer='normal',
# activation='relu')(bert_output_emmbed)
# x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
# concat_out.append(x)
# x = Concatenate(axis=1)(concat_out)
# x = Dropout(self.dropout)(x)
x = Flatten()(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activate_classify)(x)
output_layers = [dense_layer]
self.model = Model(self.word_embedding.input, output_layers)
self.model.summary(120)
示例9: word_level
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def word_level(self):
x_input_word = Input(shape=(self.len_max, self.embed_size))
# x = SpatialDropout1D(self.dropout_spatial)(x_input_word)
x = Bidirectional(GRU(units=self.rnn_units,
return_sequences=True,
activation='relu',
kernel_regularizer=regularizers.l2(self.l2),
recurrent_regularizer=regularizers.l2(self.l2)))(x_input_word)
out_sent = AttentionSelf(self.rnn_units*2)(x)
model = Model(x_input_word, out_sent)
return model
示例10: sentence_level
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def sentence_level(self):
x_input_sen = Input(shape=(self.len_max, self.rnn_units*2))
# x = SpatialDropout1D(self.dropout_spatial)(x_input_sen)
output_doc = Bidirectional(GRU(units=self.rnn_units*2,
return_sequences=True,
activation='relu',
kernel_regularizer=regularizers.l2(self.l2),
recurrent_regularizer=regularizers.l2(self.l2)))(x_input_sen)
output_doc_att = AttentionSelf(self.word_embedding.embed_size)(output_doc)
model = Model(x_input_sen, output_doc_att)
return model
示例11: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
embedding_output = self.word_embedding.output
embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(embedding_output)
# 首先是 region embedding 层
conv_1 = Conv1D(self.filters[0][0],
kernel_size=1,
strides=1,
padding='SAME',
kernel_regularizer=l2(self.l2),
bias_regularizer=l2(self.l2),
activation=self.activation_conv,
)(embedding_output_spatial)
block = ReLU()(conv_1)
for filters_block in self.filters:
for j in range(filters_block[1]-1):
# conv + short-cut
block_mid = self.convolutional_block(block, units=filters_block[0])
block = shortcut_conv(block, block_mid, shortcut=True)
# 这里是conv + max-pooling
block_mid = self.convolutional_block(block, units=filters_block[0])
block = shortcut_pool(block, block_mid, filters=filters_block[0], pool_type=self.pool_type, shortcut=True)
block = k_max_pooling(top_k=self.top_k)(block)
block = Flatten()(block)
block = Dropout(self.dropout)(block)
# 全连接层
# block_fully = Dense(2048, activation='tanh')(block)
# output = Dense(2048, activation='tanh')(block_fully)
output = Dense(self.label, activation=self.activate_classify)(block)
self.model = Model(inputs=self.word_embedding.input, outputs=output)
self.model.summary(120)
示例12: create_model_gru
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def create_model_gru(self, hyper_parameters):
"""
构建神经网络, bi-gru + capsule
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
embedding = self.word_embedding.output
embed_layer = SpatialDropout1D(self.dropout)(embedding)
x_bi = Bidirectional(GRU(self.filters_num,
activation='relu',
dropout=self.dropout,
recurrent_dropout=self.dropout,
return_sequences=True))(embed_layer)
# 一层
capsule = Capsule_bojone(num_capsule=self.num_capsule,
dim_capsule=self.dim_capsule,
routings=self.routings,
kernel_size=(3, 1),
share_weights=True)(x_bi)
# # pooling多层
# conv_pools = []
# for filter in self.filters:
# capsule = Capsule_bojone(num_capsule=self.num_capsule,
# dim_capsule=self.dim_capsule,
# routings=self.routings,
# kernel_size=(filter, 1),
# share_weights=True)(x_bi)
# conv_pools.append(capsule)
# capsule = Concatenate(axis=-1)(conv_pools)
capsule = Flatten()(capsule)
capsule = Dropout(self.dropout)(capsule)
output = Dense(self.label, activation=self.activate_classify)(capsule)
self.model = Model(inputs=self.word_embedding.input, outputs=output)
self.model.summary(120)
示例13: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
embedding_output = self.word_embedding.output
# x = embedding_output
x = Lambda(lambda x : x[:, -2:-1, :])(embedding_output) # 获取CLS
# # text cnn
# bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
# concat_out = []
# for index, filter_size in enumerate(self.filters):
# x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
# filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
# strides=1,
# kernel_size=self.filters[index],
# padding='valid',
# kernel_initializer='normal',
# activation='relu')(bert_output_emmbed)
# x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
# concat_out.append(x)
# x = Concatenate(axis=1)(concat_out)
# x = Dropout(self.dropout)(x)
x = Flatten()(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activate_classify)(x)
output_layers = [dense_layer]
self.model = Model(self.word_embedding.input, output_layers)
self.model.summary(120)
示例14: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def create_model(self, hyper_parameters):
"""
构建神经网络
:param hyper_parameters:json, hyper parameters of network
:return: tensor, moedl
"""
super().create_model(hyper_parameters)
embedding_output = self.word_embedding.output
x = Lambda(lambda x : x[:, 0:1, :])(embedding_output) # 获取CLS
# # text cnn
# bert_output_emmbed = SpatialDropout1D(rate=self.dropout)(embedding_output)
# concat_out = []
# for index, filter_size in enumerate(self.filters):
# x = Conv1D(name='TextCNN_Conv1D_{}'.format(index),
# filters= self.filters_num, # int(K.int_shape(embedding_output)[-1]/self.len_max),
# strides=1,
# kernel_size=self.filters[index],
# padding='valid',
# kernel_initializer='normal',
# activation='relu')(bert_output_emmbed)
# x = GlobalMaxPooling1D(name='TextCNN_MaxPool1D_{}'.format(index))(x)
# concat_out.append(x)
# x = Concatenate(axis=1)(concat_out)
# x = Dropout(self.dropout)(x)
x = Flatten()(x)
# 最后就是softmax
dense_layer = Dense(self.label, activation=self.activate_classify)(x)
output_layers = [dense_layer]
self.model = Model(self.word_embedding.input, output_layers)
self.model.summary(132)
示例15: test_dropout
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import SpatialDropout1D [as 别名]
def test_dropout():
layer_test(layers.Dropout,
kwargs={'rate': 0.5},
input_shape=(3, 2))
layer_test(layers.Dropout,
kwargs={'rate': 0.5, 'noise_shape': [3, 1]},
input_shape=(3, 2))
layer_test(layers.Dropout,
kwargs={'rate': 0.5, 'noise_shape': [None, 1]},
input_shape=(3, 2))
layer_test(layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
for data_format in ['channels_last', 'channels_first']:
for shape in [(4, 5), (4, 5, 6)]:
if data_format == 'channels_last':
input_shape = (2,) + shape + (3,)
else:
input_shape = (2, 3) + shape
layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D,
kwargs={'rate': 0.5,
'data_format': data_format},
input_shape=input_shape)
# Test invalid use cases
with pytest.raises(ValueError):
layer_test(layers.SpatialDropout2D if len(shape) == 2 else layers.SpatialDropout3D,
kwargs={'rate': 0.5,
'data_format': 'channels_middle'},
input_shape=input_shape)