本文整理汇总了Python中keras.layers.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python layers.LSTM属性的具体用法?Python layers.LSTM怎么用?Python layers.LSTM使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类keras.layers
的用法示例。
在下文中一共展示了layers.LSTM属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def create_model(time_window_size, metric):
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
input_shape=(time_window_size, 1)))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(64))
model.add(Dense(units=time_window_size, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
# model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
# model.compile(optimizer="sgd", loss="mse", metrics=[metric])
print(model.summary())
return model
示例2: create_network
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
recurrent_dropout=0.3,
return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
model.add(LSTM(512))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Load the weights to each node
model.load_weights('weights.hdf5')
return model
示例3: RNNModel
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
image_model = Dense(embedding_size, activation='relu')(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
示例4: get_model_41
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def get_model_41(params):
embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb"))
# main sequential model
model = Sequential()
model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'],
weights=embedding_weights))
#model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
model.add(LSTM(2048))
#model.add(Dropout(params['dropout_prob'][1]))
model.add(Dense(output_dim=params["n_out"], init="uniform"))
model.add(Activation(params['final_activation']))
logging.debug("Output CNN: %s" % str(model.output_shape))
if params['final_activation'] == 'linear':
model.add(Lambda(lambda x :K.l2_normalize(x, axis=1)))
return model
# CRNN Arch for audio
示例5: train_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def train_model():
if cxl_model:
embedding_matrix = load_embedding()
else:
embedding_matrix = {}
train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
n = np.array(label, dtype=np.float)
labels = n.reshape((n.shape[0], n.shape[1], 1))
model = Sequential([
Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
trainable=False),
SpatialDropout1D(0.2),
Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
TimeDistributed(Dense(len(tag), activation=relu)),
])
crf_ = CRF(units=len(tag), sparse_target=True)
model.add(crf_)
model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
model.save(model_path)
示例6: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def create_model():
inputs = Input(shape=(length,), dtype='int32', name='inputs')
embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs)
bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1)
bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm)
embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs)
con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2)
con_d = Dropout(DROPOUT_RATE)(con)
dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d)
rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2)
dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn)
crf = CRF(len(chunk_tags), sparse_target=True)
crf_output = crf(dense)
model = Model(input=[inputs], output=[crf_output])
model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy])
return model
示例7: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def __init__(self, use_gpu: bool = False):
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.backend import set_session
latent_dim = StructureModel.SEQUENCE_LENGTH * 8
model = Sequential()
model.add(
Embedding(StructureFeatureAnalyzer.NUM_FEATURES, StructureFeatureAnalyzer.NUM_FEATURES,
input_length=StructureModel.SEQUENCE_LENGTH))
model.add(LSTM(latent_dim, dropout=0.2, return_sequences=False))
model.add(Dense(StructureFeatureAnalyzer.NUM_FEATURES, activation='softmax'))
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
self.model = model
if use_gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
示例8: get_audio_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def get_audio_model(self):
# Modality specific hyperparameters
self.epochs = 100
self.batch_size = 50
# Modality specific parameters
self.embedding_dim = self.train_x.shape[2]
print("Creating Model...")
inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
masked = Masking(mask_value =0)(inputs)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4))(masked)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(lstm)
output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)
model = Model(inputs, output)
return model
示例9: get_bimodal_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def get_bimodal_model(self):
# Modality specific hyperparameters
self.epochs = 100
self.batch_size = 10
# Modality specific parameters
self.embedding_dim = self.train_x.shape[2]
print("Creating Model...")
inputs = Input(shape=(self.sequence_length, self.embedding_dim), dtype='float32')
masked = Masking(mask_value =0)(inputs)
lstm = Bidirectional(LSTM(300, activation='tanh', return_sequences = True, dropout=0.4), name="utter")(masked)
output = TimeDistributed(Dense(self.classes,activation='softmax'))(lstm)
model = Model(inputs, output)
return model
示例10: _build
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def _build(self):
# the model that will be trained
rnn_x = Input(shape=(None, Z_DIM + ACTION_DIM))
lstm = LSTM(HIDDEN_UNITS, return_sequences=True, return_state=True)
lstm_output, _, _ = lstm(rnn_x)
mdn = Dense(Z_DIM)(lstm_output)
rnn = Model(rnn_x, mdn)
# the model used during prediction
state_input_h = Input(shape=(HIDDEN_UNITS,))
state_input_c = Input(shape=(HIDDEN_UNITS,))
state_inputs = [state_input_h, state_input_c]
_, state_h, state_c = lstm(rnn_x, initial_state=state_inputs)
forward = Model([rnn_x] + state_inputs, [state_h, state_c])
optimizer = Adam(lr=0.0001)
# optimizer = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True)
rnn.compile(loss='mean_squared_error', optimizer=optimizer)
return [rnn, forward]
示例11: _build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def _build_model(self, num_features, num_actions, max_history_len):
"""Build a keras model and return a compiled model.
:param max_history_len: The maximum number of historical turns used to
decide on next action"""
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32 # size of hidden layer in LSTM
# Build Model
batch_shape = (None, max_history_len, num_features)
model = Sequential()
model.add(Masking(-1, batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
logger.debug(model.summary())
return model
示例12: _build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def _build_model(self, num_features, num_actions, max_history_len):
"""Build a keras model and return a compiled model.
:param max_history_len: The maximum number of historical
turns used to decide on next action
"""
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32 # Neural Net and training params
batch_shape = (None, max_history_len, num_features)
# Build Model
model = Sequential()
model.add(Masking(-1, batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
model.add(Dense(input_dim=n_hidden, units=num_actions))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
logger.debug(model.summary())
return model
示例13: GeneratorPretraining
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def GeneratorPretraining(V, E, H):
'''
Model for Generator pretraining. This model's weights should be shared with
Generator.
# Arguments:
V: int, Vocabrary size
E: int, Embedding size
H: int, LSTM hidden size
# Returns:
generator_pretraining: keras Model
input: word ids, shape = (B, T)
output: word probability, shape = (B, T, V)
'''
# in comment, B means batch size, T means lengths of time steps.
input = Input(shape=(None,), dtype='int32', name='Input') # (B, T)
out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E)
out = LSTM(H, return_sequences=True, name='LSTM')(out) # (B, T, H)
out = TimeDistributed(
Dense(V, activation='softmax', name='DenseSoftmax'),
name='TimeDenseSoftmax')(out) # (B, T, V)
generator_pretraining = Model(input, out)
return generator_pretraining
示例14: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def __init__(self, sess, B, V, E, H, lr=1e-3):
'''
# Arguments:
B: int, Batch size
V: int, Vocabrary size
E: int, Embedding size
H: int, LSTM hidden size
# Optional Arguments:
lr: float, learning rate, default is 0.001
'''
self.sess = sess
self.B = B
self.V = V
self.E = E
self.H = H
self.lr = lr
self._build_gragh()
self.reset_rnn_state()
示例15: Discriminator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import LSTM [as 别名]
def Discriminator(V, E, H=64, dropout=0.1):
'''
Disciriminator model.
# Arguments:
V: int, Vocabrary size
E: int, Embedding size
H: int, LSTM hidden size
dropout: float
# Returns:
discriminator: keras model
input: word ids, shape = (B, T)
output: probability of true data or not, shape = (B, 1)
'''
input = Input(shape=(None,), dtype='int32', name='Input') # (B, T)
out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E)
out = LSTM(H)(out)
out = Highway(out, num_layers=1)
out = Dropout(dropout, name='Dropout')(out)
out = Dense(1, activation='sigmoid', name='FC')(out)
discriminator = Model(input, out)
return discriminator