本文整理匯總了Python中keras.layers.Bidirectional方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Bidirectional方法的具體用法?Python layers.Bidirectional怎麽用?Python layers.Bidirectional使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.layers
的用法示例。
在下文中一共展示了layers.Bidirectional方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __build_model
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def __build_model(self):
model = Sequential()
embedding_layer = Embedding(input_dim=len(self.vocab) + 1,
output_dim=self.embedding_dim,
weights=[self.embedding_mat],
trainable=False)
model.add(embedding_layer)
bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True))
model.add(bilstm_layer)
model.add(TimeDistributed(Dense(256, activation="relu")))
crf_layer = CRF(units=len(self.tags), sparse_target=True)
model.add(crf_layer)
model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.summary()
return model
示例2: create_model
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def create_model():
inputs = Input(shape=(length,), dtype='int32', name='inputs')
embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
con_d = Dropout(DROPOUT_RATE)(con)
dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
crf = CRF(len(chunk_tags), sparse_target=True)
crf_output = crf(dense)
model = Model(input=[inputs], output=[crf_output])
model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
return model
示例3: get_audio_model
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def get_audio_model(self):
# Modality specific hyperparameters
self.epochs = 100
self.batch_size = 50
# Modality specific parameters
self.embedding_dim = self.train_x.shape[2]
print("Creating Model...")
inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
masked = Masking(mask_value =0)(inputs)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm)
output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)
model = Model(inputs, output)
return model
示例4: get_bimodal_model
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def get_bimodal_model(self):
# Modality specific hyperparameters
self.epochs = 100
self.batch_size = 10
# Modality specific parameters
self.embedding_dim = self.train_x.shape[2]
print("Creating Model...")
inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
masked = Masking(mask_value =0)(inputs)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked)
output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)
model = Model(inputs, output)
return model
示例5: CapsuleNet
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(170,))
x = Embedding(21099, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例6: CapsuleNet_v2
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
K.clear_session()
inputs = Input(shape=(200,))
x = Embedding(20000, 300, trainable=True)(inputs)
x = SpatialDropout1D(dropout_rate)(x)
x = Bidirectional(
CuDNNGRU(n_recurrent, return_sequences=True,
kernel_regularizer=l2(l2_penalty),
recurrent_regularizer=l2(l2_penalty)))(x)
x = PReLU()(x)
x = Capsule(
num_capsule=n_capsule, dim_capsule=capsule_dim,
routings=n_routings, share_weights=True)(x)
x = Flatten(name = 'concatenate')(x)
x = Dropout(dropout_rate)(x)
# fc = Dense(128, activation='sigmoid')(x)
outputs = Dense(6, activation='softmax')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
return model
示例7: create_model
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def create_model(maxlen, chars, word_size, infer=False):
"""
:param infer:
:param maxlen:
:param chars:
:param word_size:
:return:
"""
sequence = Input(shape=(maxlen,), dtype='int32')
embedded = Embedding(len(chars) + 1, word_size, input_length=maxlen, mask_zero=True)(sequence)
blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(embedded)
output = TimeDistributed(Dense(5, activation='softmax'))(blstm)
model = Model(input=sequence, output=output)
if not infer:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
示例8: create_lstm
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def create_lstm(hidden_units=[50], dropout=0.05, bidirectional=True):
model = Sequential()
if bidirectional:
i = 0
for unit in hidden_units:
if i == 0:
model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS)))
else:
model.add(Bidirectional(LSTM(unit, dropout=dropout, return_sequences=True)))
i += 1
else:
i = 0
for unit in hidden_units:
if i == 0:
model.add(LSTM(unit, dropout=dropout, return_sequences=True), input_shape=(None, config.N_MELS))
else:
model.add(LSTM(unit, dropout=dropout, return_sequences=True))
i += 1
model.add(TimeDistributed(Dense(config.CLASSES, activation='sigmoid')))
return model
示例9: __build_model
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def __build_model(self, emb_matrix=None):
word_input = Input(shape=(None,), dtype='int32', name="word_input")
word_emb = Embedding(self.vocab_size + 1, self.embed_dim,
weights=[emb_matrix] if emb_matrix is not None else None,
trainable=True if emb_matrix is None else False,
name='word_emb')(word_input)
bilstm_output = Bidirectional(LSTM(self.bi_lstm_units // 2,
return_sequences=True))(word_emb)
bilstm_output = Dropout(self.dropout_rate)(bilstm_output)
output = Dense(self.chunk_size + 1, kernel_initializer="he_normal")(bilstm_output)
output = CRF(self.chunk_size + 1, sparse_target=self.sparse_target)(output)
model = Model([word_input], [output])
parallel_model = model
if self.num_gpu > 1:
parallel_model = multi_gpu_model(model, gpus=self.num_gpu)
parallel_model.compile(optimizer=self.optimizer, loss=crf_loss, metrics=[crf_accuracy])
return model, parallel_model
示例10: bidLstm
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def bidLstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
#x = Dropout(dropout_rate)(x)
x = Attention(maxlen)(x)
#x = AttentionWeightedAverage(maxlen)(x)
#print('len(x):', len(x))
#x = AttentionWeightedAverage(maxlen)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# conv+GRU with embeddings
示例11: build_model_bilstm_single
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def build_model_bilstm_single(self):
if args.use_lstm:
if args.use_cudnn_cell:
layer_cell = CuDNNLSTM
else:
layer_cell = LSTM
else:
if args.use_cudnn_cell:
layer_cell = CuDNNGRU
else:
layer_cell = GRU
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# Bi-LSTM
x = Bidirectional(layer_cell(units=args.units, return_sequences=args.return_sequences,
kernel_regularizer=regularizers.l2(args.l2 * 0.1),
recurrent_regularizer=regularizers.l2(args.l2)
))(bert_output)
x = Dropout(args.keep_prob)(x)
x = Flatten()(x)
# 最後就是softmax
dense_layer = Dense(args.label, activation=args.activation)(x)
output_layers = [dense_layer]
self.model = Model(bert_inputs, output_layers)
示例12: build_model_bilstm_layers
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def build_model_bilstm_layers(self):
if args.use_lstm:
if args.use_cudnn_cell:
layer_cell = CuDNNLSTM
else:
layer_cell = LSTM
else:
if args.use_cudnn_cell:
layer_cell = CuDNNGRU
else:
layer_cell = GRU
# bert embedding
bert_inputs, bert_output = KerasBertEmbedding().bert_encode()
# Bi-LSTM
x = Bidirectional(layer_cell(units=args.units,
return_sequences=args.return_sequences,
))(bert_output)
# 最後
x = TimeDistributed(Dropout(self.keep_prob))(x)
dense_layer = Dense(args.max_seq_len, activation=args.activation)(x)
crf = CRF(args.label, sparse_target=False, learn_mode="join", test_mode='viterbi')
output_layers = crf(dense_layer)
self.model = Model(bert_inputs, output_layers)
self.model.summary(132)
示例13: create_BiLSTM
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def create_BiLSTM(wordvecs, lstm_dim=300, output_dim=2, dropout=.5,
weights=None, train=True):
model = Sequential()
if weights != None:
model.add(Embedding(len(wordvecs)+1,
len(wordvecs['the']),
weights=[weights],
trainable=train))
else:
model.add(Embedding(len(wordvecs)+1,
len(wordvecs['the']),
trainable=train))
model.add(Dropout(dropout))
model.add(Bidirectional(LSTM(lstm_dim)))
model.add(Dropout(dropout))
model.add(Dense(output_dim, activation='softmax'))
if output_dim == 2:
model.compile('adam', 'binary_crossentropy',
metrics=['accuracy'])
else:
model.compile('adam', 'categorical_crossentropy',
metrics=['accuracy'])
return model
開發者ID:Artaches,項目名稱:SSAN-self-attention-sentiment-analysis-classification,代碼行數:25,代碼來源:lstm_bilstm.py
示例14: print_results
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def print_results(bi, file, out_file, file_type):
names, results, std_devs, dim = test_embeddings(bi, file, file_type)
rr = [[u'{0:.3f} \u00B1{1:.3f}'.format(r, s) for r, s in zip(result, std_dev)] for result, std_dev in zip(results, std_devs)]
table_data = [[name] + result for name, result in zip(names, rr)]
table = tabulate.tabulate(table_data, headers=['dataset', 'acc', 'prec', 'rec', 'f1'], tablefmt='simple', floatfmt='.3f')
if out_file:
with open(out_file, 'a') as f:
f.write('\n')
if bi:
f.write('+++Bidirectional LSTM+++\n')
else:
f.write('+++LSTM+++\n')
f.write(table)
f.write('\n')
else:
print()
if bi:
print('Bidirectional LSTM')
else:
print('LSTM')
print(table)
開發者ID:Artaches,項目名稱:SSAN-self-attention-sentiment-analysis-classification,代碼行數:26,代碼來源:lstm_bilstm.py
示例15: forward
# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import Bidirectional [as 別名]
def forward(self):
model_input = Input(shape=(self.maxlen,), dtype='int32', name='token')
x = Token_Embedding(model_input, self.nb_tokens, self.embedding_dim,
self.token_embeddings, True, self.maxlen,
self.embed_dropout_rate, name='token_embeddings')
x = Activation('tanh')(x)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output = Bidirectional(
LSTM(self.rnn_size, return_sequences=True), name="bi_lstm_0")(x)
lstm_1_output = Bidirectional(
LSTM(self.rnn_size, return_sequences=True), name="bi_lstm_1")(lstm_0_output)
x = concatenate([lstm_1_output, lstm_0_output, x], name='concatenate')
x = self.attention_layer(x)
if self.return_attention:
x, weights = x
outputs = tc_output_logits(x, self.nb_classes, self.final_dropout_rate)
if self.return_attention:
outputs.append(weights)
outputs = concatenate(outputs, axis=-1, name='outputs')
self.model = Model(inputs=model_input,
outputs=outputs, name="Bi_LSTM_Attention")