本文整理汇总了Python中keras.layers.RepeatVector方法的典型用法代码示例。如果您正苦于以下问题:Python layers.RepeatVector方法的具体用法?Python layers.RepeatVector怎么用?Python layers.RepeatVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.RepeatVector方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_repeat_vector
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def test_repeat_vector(self):
from keras.layers import RepeatVector
model = Sequential()
model.add(RepeatVector(3, input_shape=(5,)))
input_names = ["input"]
output_names = ["output"]
spec = keras.convert(model, input_names, output_names).get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
self.assertTrue(spec.HasField("neuralNetwork"))
# Test the inputs and outputs
self.assertEquals(len(spec.description.input), len(input_names))
six.assertCountEqual(
self, input_names, [x.name for x in spec.description.input]
)
self.assertEquals(len(spec.description.output), len(output_names))
six.assertCountEqual(
self, output_names, [x.name for x in spec.description.output]
)
layers = spec.neuralNetwork.layers
self.assertIsNotNone(layers[0].sequenceRepeat)
示例2: test_tiny_babi_rnn
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def test_tiny_babi_rnn(self):
vocab_size = 10
embed_hidden_size = 8
story_maxlen = 5
query_maxlen = 5
input_tensor_1 = Input(shape=(story_maxlen,))
x1 = Embedding(vocab_size, embed_hidden_size)(input_tensor_1)
x1 = Dropout(0.3)(x1)
input_tensor_2 = Input(shape=(query_maxlen,))
x2 = Embedding(vocab_size, embed_hidden_size)(input_tensor_2)
x2 = Dropout(0.3)(x2)
x2 = LSTM(embed_hidden_size, return_sequences=False)(x2)
x2 = RepeatVector(story_maxlen)(x2)
x3 = add([x1, x2])
x3 = LSTM(embed_hidden_size, return_sequences=False)(x3)
x3 = Dropout(0.3)(x3)
x3 = Dense(vocab_size, activation="softmax")(x3)
model = Model(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])
self._test_model(model, one_dim_seq_flags=[True, True])
示例3: test_repeat_vector
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def test_repeat_vector(self):
from keras.layers import RepeatVector
model = Sequential()
model.add(RepeatVector(3, input_shape=(5,)))
input_names = ["input"]
output_names = ["output"]
spec = keras.convert(model, input_names, output_names).get_spec()
self.assertIsNotNone(spec)
# Test the model class
self.assertIsNotNone(spec.description)
self.assertTrue(spec.HasField("neuralNetwork"))
# Test the inputs and outputs
self.assertEquals(len(spec.description.input), len(input_names))
self.assertEqual(
sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))
)
self.assertEquals(len(spec.description.output), len(output_names))
self.assertEqual(
sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))
)
layers = spec.neuralNetwork.layers
self.assertIsNotNone(layers[0].sequenceRepeat)
示例4: fit_dep
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def fit_dep(self, x, y=None):
timesteps = x.shape[1]
input_dim = x.shape[2]
inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(self.latent_dim)(inputs)
decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(input_dim, return_sequences=True)(decoded)
encoded_input = Input(shape=(self.latent_dim,))
self.sequence_autoencoder = Model(inputs, decoded)
self.encoder = Model(inputs, encoded)
self.sequence_autoencoder.compile(
#loss='binary_crossentropy',
loss='categorical_crossentropy',
optimizer='RMSprop',
metrics=['binary_accuracy']
)
self.sequence_autoencoder.fit(x, x)
示例5: repeat_vector
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def repeat_vector(inputs):
"""
Temporary solution:
Use this function within a Lambda layer to get a repeated layer with a variable 1-st dimension (seq_len).
May be useful to further feed it to a Concatenate layer.
inputs == (layer_for_repeat, layer_for_getting_rep_num):
layer_for_repeat: shape == (batch_size, vector_dim)
layer_for_getting_rep_num: shape == (batch_size, seq_len, ...)
:return:
repeated layer_for_repeat, shape == (batch_size, seq_len, vector_dim)
"""
layer_for_repeat, layer_for_getting_rep_num = inputs
repeated_vector = RepeatVector(
n=K.shape(layer_for_getting_rep_num)[1], name='custom_repeat_vector')(layer_for_repeat)
# shape == (batch_size, seq_len, vector_dim)
return repeated_vector
示例6: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def create_model(self, ret_model = False):
image_model = Sequential()
image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
image_model.add(RepeatVector(self.max_length))
lang_model = Sequential()
lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_length))
lang_model.add(LSTM(256,return_sequences=True))
lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))
model = Sequential()
model.add(Merge([image_model, lang_model], mode='concat'))
model.add(LSTM(1000,return_sequences=False))
model.add(Dense(self.vocab_size))
model.add(Activation('softmax'))
print ("Model created!")
if(ret_model==True):
return model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
示例7: AlternativeRNNModel
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dense(embedding_size, activation='relu')(image_input)
image_model = RepeatVector(max_len)(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
# Since we are going to predict the next word using the previous words
# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model
示例8: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def create_model(self, ret_model = False):
#base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3))
#base_model.trainable=False
image_model = Sequential()
#image_model.add(base_model)
#image_model.add(Flatten())
image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
image_model.add(RepeatVector(self.max_cap_len))
lang_model = Sequential()
lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len))
lang_model.add(LSTM(256,return_sequences=True))
lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))
model = Sequential()
model.add(Merge([image_model, lang_model], mode='concat'))
model.add(LSTM(1000,return_sequences=False))
model.add(Dense(self.vocab_size))
model.add(Activation('softmax'))
print "Model created!"
if(ret_model==True):
return model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
示例9: _test_one_to_many
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def _test_one_to_many(self, keras_major_version):
params = (
dict(
input_dims=[1, 10],
activation="tanh",
return_sequences=False,
output_dim=3,
),
)
number_of_times = 4
model = Sequential()
model.add(RepeatVector(number_of_times, input_shape=(10,)))
if keras_major_version == 2:
model.add(
LSTM(
params[0]["output_dim"],
input_shape=params[0]["input_dims"],
activation=params[0]["activation"],
recurrent_activation="sigmoid",
return_sequences=True,
)
)
else:
model.add(
LSTM(
output_dim=params[0]["output_dim"],
activation=params[0]["activation"],
inner_activation="sigmoid",
return_sequences=True,
)
)
relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
# print relative_error, '\n', keras_preds, '\n', coreml_preds, '\n'
for i in range(len(relative_error)):
self.assertLessEqual(relative_error[i], 0.01)
示例10: base_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def base_model(feature_len=1, after_day=1, input_shape=(20, 1)):
model = Sequential()
model.add(LSTM(units=100, return_sequences=False, input_shape=input_shape))
#model.add(LSTM(units=100, return_sequences=False, input_shape=input_shape))
# one to many
model.add(RepeatVector(after_day))
model.add(LSTM(200, return_sequences=True))
#model.add(LSTM(50, return_sequences=True))
model.add(TimeDistributed(Dense(units=feature_len, activation='linear')))
return model
示例11: seq2seq_attention
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def seq2seq_attention(feature_len=1, after_day=1, input_shape=(20, 1), time_step=20):
# Define the inputs of your model with a shape (Tx, feature)
X = Input(shape=input_shape)
# Initialize empty list of outputs
all_outputs = []
# Encoder: pre-attention LSTM
encoder = LSTM(units=100, return_state=True, return_sequences=True, name='encoder')
# Decoder: post-attention LSTM
decoder = LSTM(units=100, return_state=True, name='decoder')
# Output
decoder_output = Dense(units=feature_len, activation='linear', name='output')
model_output = Reshape((1, feature_len))
# Attention
repeator = RepeatVector(time_step)
concatenator = Concatenate(axis=-1)
densor = Dense(1, activation = "relu")
activator = Activation(softmax, name='attention_weights')
dotor = Dot(axes = 1)
encoder_outputs, s, c = encoder(X)
for t in range(after_day):
context = one_step_attention(encoder_outputs, s, repeator, concatenator, densor, activator, dotor)
a, s, c = decoder(context, initial_state=[s, c])
outputs = decoder_output(a)
outputs = model_output(outputs)
all_outputs.append(outputs)
all_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs)
model = Model(inputs=X, outputs=all_outputs)
return model
示例12: seq2seq_attention
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def seq2seq_attention(feature_len=1, after_day=1, input_shape=(20, 1), time_step=20):
# Define the inputs of your model with a shape (Tx, feature)
X = Input(shape=input_shape)
s0 = Input(shape=(100, ), name='s0')
c0 = Input(shape=(100, ), name='c0')
s = s0
c = c0
# Initialize empty list of outputs
all_outputs = []
# Encoder: pre-attention LSTM
encoder = LSTM(units=100, return_state=False, return_sequences=True, name='encoder')
# Decoder: post-attention LSTM
decoder = LSTM(units=100, return_state=True, name='decoder')
# Output
decoder_output = Dense(units=feature_len, activation='linear', name='output')
model_output = Reshape((1, feature_len))
# Attention
repeator = RepeatVector(time_step)
concatenator = Concatenate(axis=-1)
densor = Dense(1, activation = "relu")
activator = Activation(softmax, name='attention_weights')
dotor = Dot(axes = 1)
encoder_outputs = encoder(X)
for t in range(after_day):
context = one_step_attention(encoder_outputs, s, repeator, concatenator, densor, activator, dotor)
a, s, c = decoder(context, initial_state=[s, c])
outputs = decoder_output(a)
outputs = model_output(outputs)
all_outputs.append(outputs)
all_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(all_outputs)
model = Model(inputs=[X, s0, c0], outputs=all_outputs)
return model
示例13: call
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def call(self, inputs, mask=None):
# Import (symbolic) dimensions
max_atoms = K.shape(inputs)[1]
# By [farizrahman4u](https://github.com/fchollet/keras/issues/3995)
ones = layers.Lambda(lambda x: (x * 0 + 1)[:, 0, :], output_shape=lambda s: (s[0], s[2]))(inputs)
dropped = self.dropout_layer(ones)
dropped = layers.RepeatVector(max_atoms)(dropped)
return layers.Lambda(lambda x: x[0] * x[1], output_shape=lambda s: s[0])([inputs, dropped])
示例14: test_repeat_vector
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def test_repeat_vector():
layer_test(layers.RepeatVector,
kwargs={'n': 3},
input_shape=(3, 2))
示例15: test_sequential_model_saving
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import RepeatVector [as 别名]
def test_sequential_model_saving():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(RepeatVector(3))
model.add(TimeDistributed(Dense(3)))
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
new_model = load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)