本文整理匯總了Python中keras.layers.wrappers.TimeDistributed方法的典型用法代碼示例。如果您正苦於以下問題:Python wrappers.TimeDistributed方法的具體用法?Python wrappers.TimeDistributed怎麽用?Python wrappers.TimeDistributed使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.layers.wrappers
的用法示例。
在下文中一共展示了wrappers.TimeDistributed方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: GeneratorPretraining
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def GeneratorPretraining(V, E, H):
'''
Model for Generator pretraining. This model's weights should be shared with
Generator.
# Arguments:
V: int, Vocabrary size
E: int, Embedding size
H: int, LSTM hidden size
# Returns:
generator_pretraining: keras Model
input: word ids, shape = (B, T)
output: word probability, shape = (B, T, V)
'''
# in comment, B means batch size, T means lengths of time steps.
input = Input(shape=(None,), dtype='int32', name='Input') # (B, T)
out = Embedding(V, E, mask_zero=True, name='Embedding')(input) # (B, T, E)
out = LSTM(H, return_sequences=True, name='LSTM')(out) # (B, T, H)
out = TimeDistributed(
Dense(V, activation='softmax', name='DenseSoftmax'),
name='TimeDenseSoftmax')(out) # (B, T, V)
generator_pretraining = Model(input, out)
return generator_pretraining
示例2: model_masking
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def model_masking(discrete_time, init_alpha, max_beta):
model = Sequential()
model.add(Masking(mask_value=mask_value,
input_shape=(n_timesteps, n_features)))
model.add(TimeDistributed(Dense(2)))
model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
"max_beta_value": max_beta}))
if discrete_time:
loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
else:
loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function
model.compile(loss=loss, optimizer=RMSprop(
lr=lr), sample_weight_mode='temporal')
return model
示例3: drqn
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def drqn(input_shape, action_size, learning_rate):
model = Sequential()
model.add(TimeDistributed(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu'), input_shape=(input_shape)))
model.add(TimeDistributed(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')))
model.add(TimeDistributed(Convolution2D(64, 3, 3, activation='relu')))
model.add(TimeDistributed(Flatten()))
# Use all traces for training
#model.add(LSTM(512, return_sequences=True, activation='tanh'))
#model.add(TimeDistributed(Dense(output_dim=action_size, activation='linear')))
# Use last trace for training
model.add(LSTM(512, activation='tanh'))
model.add(Dense(output_dim=action_size, activation='linear'))
adam = Adam(lr=learning_rate)
model.compile(loss='mse',optimizer=adam)
return model
示例4: a2c_lstm
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def a2c_lstm(input_shape, action_size, value_size, learning_rate):
"""Actor and Critic Network share convolution layers with LSTM
"""
state_input = Input(shape=(input_shape)) # 4x64x64x3
x = TimeDistributed(Convolution2D(32, 8, 8, subsample=(4,4), activation='relu'))(state_input)
x = TimeDistributed(Convolution2D(64, 4, 4, subsample=(2,2), activation='relu'))(x)
x = TimeDistributed(Convolution2D(64, 3, 3, activation='relu'))(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(512, activation='tanh')(x)
# Actor Stream
actor = Dense(action_size, activation='softmax')(x)
# Critic Stream
critic = Dense(value_size, activation='linear')(x)
model = Model(input=state_input, output=[actor, critic])
adam = Adam(lr=learning_rate, clipnorm=1.0)
model.compile(loss=['categorical_crossentropy', 'mse'], optimizer=adam, loss_weights=[1., 1.])
return model
示例5: change_trainable
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def change_trainable(layer, trainable, verbose=False):
""" Helper method that fixes some of Keras' issues with wrappers and
trainability. Freezes or unfreezes a given layer.
# Arguments:
layer: Layer to be modified.
trainable: Whether the layer should be frozen or unfrozen.
verbose: Verbosity flag.
"""
layer.trainable = trainable
if type(layer) == Bidirectional:
layer.backward_layer.trainable = trainable
layer.forward_layer.trainable = trainable
if type(layer) == TimeDistributed:
layer.backward_layer.trainable = trainable
if verbose:
action = 'Unfroze' if trainable else 'Froze'
print("{} {}".format(action, layer.name))
示例6: build_cnn_to_lstm_model
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def build_cnn_to_lstm_model(self, input_shape, optimizer=Adam(lr=1e-6, decay=1e-5)):
model = Sequential()
model.add(TimeDistributed(Convolution2D(16, 3, 3), input_shape=input_shape))
model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(Convolution2D(16, 3, 3)))
model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Dropout(0.2)))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dense(200)))
model.add(TimeDistributed(Dense(50, name="first_dense")))
model.add(LSTM(20, return_sequences=False, name="lstm_layer"))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
self.model = model
示例7: test_large_batch_gpu
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def test_large_batch_gpu(self):
batch_size = 2049
num_channels = 4
kernel_size = 3
model = Sequential()
model.add(
TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size))
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) / 5.0 for w in model.get_weights()]
)
self._test_keras_model(
model, input_blob="data", output_blob="output", delta=1e-2
)
示例8: test_tiny_image_captioning
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16, 16, 3))
x = Convolution2D(2, 3, 3)(img_input_1)
x = Flatten()(x)
img_model = Model([img_input_1], [x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = merge([x, y], mode="concat", concat_axis=1, name="cap_merge")
z = LSTM(4, return_sequences=True, name="cap_lstm")(z)
z = TimeDistributed(Dense(8), name="cap_timedistributed")(z)
combined_model = Model([img_input, sentence_input], [z])
self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
示例9: test_large_batch_gpu
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def test_large_batch_gpu(self):
batch_size = 2049
num_channels = 4
kernel_size = 3
model = Sequential()
model.add(
TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size))
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
示例10: test_time_distributed_conv
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def test_time_distributed_conv(self):
model = Sequential()
model.add(
TimeDistributed(
Conv2D(64, (3, 3), activation="relu"), input_shape=(1, 30, 30, 3)
)
)
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(1, 1))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(Dropout(0.5))
model.add(LSTM(32, return_sequences=False, dropout=0.5))
model.add(Dense(10, activation="sigmoid"))
self._test_model(model)
示例11: test_tiny_image_captioning
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
z = LSTM(4, return_sequences=True, name="cap_lstm")(z)
z = TimeDistributed(Dense(8), name="cap_timedistributed")(z)
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
示例12: test_regularizers
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def test_regularizers():
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.Dense(2, kernel_regularizer='l1'), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
assert len(model.layers[0].layer.losses) == 1
assert len(model.layers[0].losses) == 1
assert len(model.layers[0].get_losses_for(None)) == 1
assert len(model.losses) == 1
model = Sequential()
model.add(wrappers.TimeDistributed(
layers.Dense(2, activity_regularizer='l1'), input_shape=(3, 4)))
model.add(layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
assert len(model.losses) == 1
示例13: GetLSTMEncoder
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def GetLSTMEncoder(xin, uin, dense_size, lstm_size, dense_layers=1,
lstm_layers=1):
'''
Get LSTM encoder.
'''
x = xin
for _ in xrange(dense_layers):
if uin is not None:
x = Concatenate(axis=-1)([x, uin])
x = TimeDistributed(Dense(dense_size))(x)
x = TimeDistributed(Activation('relu'))(x)
for i in xrange(lstm_layers):
if i == lstm_layers - 1:
sequence_out = False
else:
sequence_out = True
#sequence_out = True
x = LSTM(lstm_size, return_sequences=sequence_out)(x)
x = Activation('relu')(x)
return x
示例14: _buildDecoder
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation='relu')(z)
h = RepeatVector(max_length, name='repeat_vector')(h)
h = GRU(501, return_sequences=True, name='gru_1')(h)
h = GRU(501, return_sequences=True, name='gru_2')(h)
h = GRU(501, return_sequences=True, name='gru_3')(h)
return TimeDistributed(
Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
示例15: model_no_masking
# 需要導入模塊: from keras.layers import wrappers [as 別名]
# 或者: from keras.layers.wrappers import TimeDistributed [as 別名]
def model_no_masking(discrete_time, init_alpha, max_beta):
model = Sequential()
model.add(TimeDistributed(Dense(2), input_shape=(n_timesteps, n_features)))
model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
"max_beta_value": max_beta}))
if discrete_time:
loss = wtte.loss(kind='discrete').loss_function
else:
loss = wtte.loss(kind='continuous').loss_function
model.compile(loss=loss, optimizer=RMSprop(lr=lr))
return model