本文整理汇总了Python中keras.layers.recurrent.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python recurrent.LSTM属性的具体用法?Python recurrent.LSTM怎么用?Python recurrent.LSTM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类keras.layers.recurrent
的用法示例。
在下文中一共展示了recurrent.LSTM属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def build_model():
"""
定义模型
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
示例2: __init__
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def __init__(self, embed_size, hidden_size, vocab_size, dropin, optimiser,
l2reg, hsn_size=512, weights=None, gru=False,
clipnorm=-1, batch_size=None, t=None, lr=0.001):
self.max_t = t # Expected timesteps. Needed to build the Theano graph
# Model hyperparameters
self.vocab_size = vocab_size # size of word vocabulary
self.embed_size = embed_size # number of units in a word embedding
self.hsn_size = hsn_size # size of the source hidden vector
self.hidden_size = hidden_size # number of units in first LSTM
self.gru = gru # gru recurrent layer? (false = lstm)
self.dropin = dropin # prob. of dropping input units
self.l2reg = l2reg # weight regularisation penalty
# Optimiser hyperparameters
self.optimiser = optimiser # optimisation method
self.lr = lr
self.beta1 = 0.9
self.beta2 = 0.999
self.epsilon = 1e-8
self.clipnorm = clipnorm
self.weights = weights # initialise with checkpointed weights?
示例3: create_lstm_model
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def create_lstm_model(self,
name='convlstm_encdec',
r_state=True,
r_sequence=False):
return LSTM(units=self._num_hidden_units,
dropout=self._lstm_dropout,
recurrent_dropout=self._lstm_recurrent_dropout,
return_state=r_state,
return_sequences=r_sequence,
stateful=False,
bias_initializer='zeros',
kernel_regularizer=self._kernel_regularizer,
recurrent_regularizer=self._recurrent_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
name=name)
示例4: create_lstm_model
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def create_lstm_model(self, name='lstm', r_state=True, r_sequence=True):
"""
A Helper function that generates an instance of LSTM
:param name: Name of the layer
:param r_state: Whether to return states
:param r_sequence: Whether to return sequences
:return: An LSTM instance
"""
return LSTM(units=self._num_hidden_units,
return_state=r_state,
return_sequences=r_sequence,
stateful=False,
kernel_regularizer=self._regularizer,
recurrent_regularizer=self._regularizer,
bias_regularizer=self._regularizer,
activity_regularizer=None,
activation=self._activation,
name=name)
# Custom layers
示例5: LSTM
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def LSTM(self, argsDict):
self.paras.batch_size = argsDict["batch_size"]
self.paras.model['dropout'] = argsDict['dropout']
self.paras.model['activation'] = argsDict["activation"]
self.paras.model['optimizer'] = argsDict["optimizer"]
self.paras.model['learning_rate'] = argsDict["learning_rate"]
print(self.paras.batch_size, self.paras.model['dropout'], self.paras.model['activation'], self.paras.model['optimizer'], self.paras.model['learning_rate'])
model = self.lstm_model()
model.fit(self.train_x, self.train_y,
batch_size=self.paras.batch_size,
epochs=self.paras.epoch,
verbose=0,
callbacks=[EarlyStopping(monitor='loss', patience=5)]
)
score, mse = model.evaluate(self.test_x, self.test_y, verbose=0)
y_pred=model.predict(self.test_x)
reca=Recall_s(self.test_y,y_pred)
return -reca
示例6: plot_training_curve
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def plot_training_curve(self, history):
# %matplotlib inline
# %pylab inline
# pylab.rcParams['figure.figsize'] = (15, 9) # Change the size of plots
# LSTM training
f, ax = plt.subplots()
ax.plot(history.history['loss'])
#ax.plot(history.history['val_loss'])
ax.set_title('loss function')
ax.set_ylabel('mse')
ax.set_xlabel('epoch')
#ax.legend(['loss', 'val_loss'], loc='upper right')
ax.legend(['loss'], loc='upper right')
plt.show()
if self.paras.save == True:
w = csv.writer(open(self.paras.save_folder + 'training_curve_model.txt', 'w'))
for key, val in history.history.items():
w.writerow([key, val])
for key, val in history.params.items():
w.writerow([key, val])
# Classification
示例7: build_model
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def build_model(layers):
"""
模型定义
"""
model = Sequential()
model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(layers[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=layers[3]))
model.add(Activation("tanh"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
示例8: build_model
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def build_model(layers):
model = Sequential()
model.add(LSTM(
input_dim=layers[0],
output_dim=layers[1],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers[2]))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop", metrics=['accuracy'])
print("Compilation Time : ", time.time() - start)
return model
示例9: build_model2
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def build_model2(layers):
d = 0.2
model = Sequential()
model.add(LSTM(128, input_shape=(
layers[1], layers[0]), return_sequences=True))
model.add(Dropout(d))
model.add(LSTM(64, input_shape=(
layers[1], layers[0]), return_sequences=False))
model.add(Dropout(d))
model.add(Dense(16, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
return model
# In[10]:
示例10: fit_model_new
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def fit_model_new(train_X, train_Y, window_size = 1):
model2 = Sequential()
model2.add(LSTM(input_shape = (window_size, 1),
units = window_size,
return_sequences = True))
model2.add(Dropout(0.5))
model2.add(LSTM(256))
model2.add(Dropout(0.5))
model2.add(Dense(1))
model2.add(Activation("linear"))
model2.compile(loss = "mse",
optimizer = "adam")
model2.summary()
# Fit the first model.
model2.fit(train_X, train_Y, epochs = 80,
batch_size = 1,
verbose = 2)
return(model2)
示例11: drqn
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def drqn(input_shape, action_size, learning_rate):
model = Sequential()
model.add(TimeDistributed(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu'), input_shape=(input_shape)))
model.add(TimeDistributed(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')))
model.add(TimeDistributed(Convolution2D(64, 3, 3, activation='relu')))
model.add(TimeDistributed(Flatten()))
# Use all traces for training
#model.add(LSTM(512, return_sequences=True, activation='tanh'))
#model.add(TimeDistributed(Dense(output_dim=action_size, activation='linear')))
# Use last trace for training
model.add(LSTM(512, activation='tanh'))
model.add(Dense(output_dim=action_size, activation='linear'))
adam = Adam(lr=learning_rate)
model.compile(loss='mse',optimizer=adam)
return model
示例12: a2c_lstm
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def a2c_lstm(input_shape, action_size, value_size, learning_rate):
"""Actor and Critic Network share convolution layers with LSTM
"""
state_input = Input(shape=(input_shape)) # 4x64x64x3
x = TimeDistributed(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu'))(state_input)
x = TimeDistributed(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu'))(x)
x = TimeDistributed(Convolution2D(64, 3, 3, activation='relu'))(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(512, activation='tanh')(x)
# Actor Stream
actor = Dense(action_size, activation='softmax')(x)
# Critic Stream
critic = Dense(value_size, activation='linear')(x)
model = Model(input=state_input, output=[actor, critic])
adam = Adam(lr=learning_rate, clipnorm=1.0)
model.compile(loss=['categorical_crossentropy', 'mse'], optimizer=adam, loss_weights=[1., 1.])
return model
示例13: gen_model
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def gen_model(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
RNN_CLASS = LSTM
if RNN_LAYER_TYPE == "GRU":
RNN_CLASS = GRU
logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
(vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
logger.info("Building Model")
model = Sequential()
logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
model.add(Embedding(vocab_size, embedding_size, input_length=maxlen))
logger.info("Added Embedding Layer")
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
for i in xrange(num_hidden_layers):
model.add(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
logger.info("Added %s Layer" % RNN_LAYER_TYPE)
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
model.add(RNN_CLASS(output_dim=output_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
logger.info("Added %s Layer" % RNN_LAYER_TYPE)
model.add(Dropout(0.5))
logger.info("Added Dropout Layer")
model.add(TimeDistributedDense(output_size, activation="softmax"))
logger.info("Added Dropout Layer")
logger.info("Created model with following config:\n%s" % json.dumps(model.get_config(), indent=4))
logger.info("Compiling model with optimizer %s" % optimizer)
start_time = time.time()
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
total_time = time.time() - start_time
logger.info("Model compiled in %.4f seconds." % total_time)
return model
示例14: test_specify_initial_state_keras_tensor
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def test_specify_initial_state_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
示例15: test_specify_initial_state_non_keras_tensor
# 需要导入模块: from keras.layers import recurrent [as 别名]
# 或者: from keras.layers.recurrent import LSTM [as 别名]
def test_specify_initial_state_non_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with non-Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [K.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)]
layer = layer_class(units)
output = layer(inputs, initial_state=initial_state)
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.fit(inputs, targets)