本文整理汇总了Python中keras.layers.Merge方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Merge方法的具体用法?Python layers.Merge怎么用?Python layers.Merge使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.Merge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def create_model(layer_sizes1, layer_sizes2, input_size1, input_size2,
learning_rate, reg_par, outdim_size, use_all_singular_values):
"""
builds the whole model
the structure of each sub-network is defined in build_mlp_net,
and it can easily get substituted with a more efficient and powerful network like CNN
"""
view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par)
view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par)
model = Sequential()
model.add(Merge([view1_model, view2_model], mode='concat'))
model_optimizer = RMSprop(lr=learning_rate)
model.compile(loss=cca_loss(outdim_size, use_all_singular_values), optimizer=model_optimizer)
return model
示例2: emoji2vec_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def emoji2vec_model(embedding_matrix, emoji_vocab_size, word_vocab_size):
emoji_model = Sequential()
emoji_model.add(Embedding(emoji_vocab_size + 1, embedding_dim, input_length=1, trainable=True))
emoji_model.add(Reshape((embedding_dim,)))
word_model = Sequential()
word_model.add(Embedding(word_vocab_size + 1, embedding_dim, weights=[embedding_matrix], input_length=maximum_length, trainable=False))
word_model.add(Bidirectional(LSTM(embedding_dim, dropout=0.5), merge_mode='sum'))
model = Sequential()
model.add(Merge([emoji_model, word_model], mode='concat'))
model.add(Dense(embedding_dim * 2, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
return emoji_model, word_model, model
# Solely based on emoji descriptions, obtain the emoji2vec representations for all possible emojis
示例3: sequential_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def sequential_model(input_dim_x1=1596, input_dim_x2=10, weights_path=None):
left_branch = Sequential()
left_branch.add(Dense(4096, activation='relu', init=my_init, input_dim=input_dim_x1))
left_branch.add(Dropout(0.5))
left_branch.add(Dense(2048, activation='relu', init=my_init))
left_branch.add(Dropout(0.5))
left_branch.add(Dense(512, activation='relu', init=my_init))
right_branch = Sequential()
right_branch.add(Dense(512, activation='relu', init=my_init, input_dim=input_dim_x2))
merged = Merge([left_branch, right_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(12))
final_model.add(Activation('softmax'))
if weights_path:
final_model.load_weights(weights_path)
return final_model
示例4: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def create_model(self, ret_model = False):
image_model = Sequential()
image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
image_model.add(RepeatVector(self.max_length))
lang_model = Sequential()
lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_length))
lang_model.add(LSTM(256,return_sequences=True))
lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))
model = Sequential()
model.add(Merge([image_model, lang_model], mode='concat'))
model.add(LSTM(1000,return_sequences=False))
model.add(Dense(self.vocab_size))
model.add(Activation('softmax'))
print ("Model created!")
if(ret_model==True):
return model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
示例5: call
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def call(self, x, mask=None):
layer_output = self.layer.call(x, mask)
if isinstance(self.merge_mode, str):
self.merge_mode = Merge(mode=self.merge_mode)
output = self.merge_mode([x, layer_output])
return output
示例6: get_config
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def get_config(self):
config = {'merge_mode': {'class_name': 'Merge',
'config': self.merge_mode.get_config()}}
base_config = super(Residual, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例7: prepare_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
#m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(50,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
示例8: create_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def create_model(self, ret_model = False):
#base_model = VGG16(weights='imagenet', include_top=False, input_shape = (224, 224, 3))
#base_model.trainable=False
image_model = Sequential()
#image_model.add(base_model)
#image_model.add(Flatten())
image_model.add(Dense(EMBEDDING_DIM, input_dim = 4096, activation='relu'))
image_model.add(RepeatVector(self.max_cap_len))
lang_model = Sequential()
lang_model.add(Embedding(self.vocab_size, 256, input_length=self.max_cap_len))
lang_model.add(LSTM(256,return_sequences=True))
lang_model.add(TimeDistributed(Dense(EMBEDDING_DIM)))
model = Sequential()
model.add(Merge([image_model, lang_model], mode='concat'))
model.add(LSTM(1000,return_sequences=False))
model.add(Dense(self.vocab_size))
model.add(Activation('softmax'))
print "Model created!"
if(ret_model==True):
return model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
示例9: vqa_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def vqa_model(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate, num_classes):
vgg_model = img_model(dropout_rate)
lstm_model = Word2VecModel(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate)
print "Merging final model..."
fc_model = Sequential()
fc_model.add(Merge([vgg_model, lstm_model], mode='mul'))
fc_model.add(Dropout(dropout_rate))
fc_model.add(Dense(1000, activation='tanh'))
fc_model.add(Dropout(dropout_rate))
fc_model.add(Dense(num_classes, activation='softmax'))
fc_model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
return fc_model
示例10: basic_mlp
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def basic_mlp(img_vec_dim, vocabulary_size, word_emb_dim,
max_ques_length, num_hidden_units_lstm,
num_hidden_layers_mlp, num_hidden_units_mlp,
dropout, nb_classes, class_activation):
# Image model
model_image = Sequential()
model_image.add(Reshape((img_vec_dim,), input_shape=(img_vec_dim,)))
# Language Model
model_language = Sequential()
model_language.add(Embedding(vocabulary_size, word_emb_dim, input_length=max_ques_length))
model_language.add(LSTM(num_hidden_units_lstm, return_sequences=True, input_shape=(max_ques_length, word_emb_dim)))
model_language.add(LSTM(num_hidden_units_lstm, return_sequences=True))
model_language.add(LSTM(num_hidden_units_lstm, return_sequences=False))
# combined model
model = Sequential()
model.add(Merge([model_language, model_image], mode='concat', concat_axis=1))
for i in xrange(num_hidden_layers_mlp):
model.add(Dense(num_hidden_units_mlp))
model.add(Dropout(dropout))
model.add(Dense(nb_classes))
model.add(Activation(class_activation))
return model
示例11: deeper_lstm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def deeper_lstm(img_vec_dim, activation_1,activation_2, dropout, vocabulary_size,
num_hidden_units_lstm, max_ques_length,
word_emb_dim, num_hidden_layers_mlp,
num_hidden_units_mlp, nb_classes, class_activation,embedding_matrix):
# Make image model
inpx1=Input(shape=(img_vec_dim,))
x1=Dense(1024, activation=activation_1)(inpx1)
x1=Dropout(dropout)(x1)
image_model = Model([inpx1],x1)
image_model.summary()
# Make language Model
inpx0=Input(shape=(max_ques_length,))
x0=Embedding(vocabulary_size, word_emb_dim, weights=[embedding_matrix], trainable=False)(inpx0)
x1=LSTM(num_hidden_units_lstm, return_sequences=True)(x0)
x1=LSTM(num_hidden_units_lstm, return_sequences=True)(x1)
x2=LSTM(num_hidden_units_lstm, return_sequences=False)(x1)
x2=Dense(1024,activation=activation_2)(x2)
x2=Dropout(dropout)(x2)
# Make embedding_model
embedding_model = Model([inpx0],x2)
embedding_model.summary()
# Make combined model
model = Sequential()
model.add(Merge([image_model,embedding_model],mode = 'mul'))
for i in xrange(num_hidden_layers_mlp):
model.add(Dense(num_hidden_units_mlp))
model.add(Activation(activation_1))
model.add(Dropout(dropout))
model.summary()
model.add(Dense(nb_classes))
model.add(Activation(class_activation))
return model
示例12: visual_lstm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def visual_lstm(img_vec_dim, activation_1,activation_2, dropout, vocabulary_size,
num_hidden_units_lstm, max_ques_length,
word_emb_dim, num_hidden_layers_mlp,
num_hidden_units_mlp, nb_classes, class_activation,embedding_matrix):
# Make image model
inpx1=Input(shape=(img_vec_dim,))
x1=Dense(embedding_matrix.shape[1], activation='tanh')(inpx1)
x1=Reshape((1,embedding_matrix.shape[1]))(x1)
image_model = Model([inpx1],x1)
image_model.summary()
# Make language Model
inpx0=Input(shape=(max_ques_length,))
x0=Embedding(vocabulary_size, word_emb_dim, weights=[embedding_matrix], trainable=False)(inpx0)
x2=Dense(embedding_matrix.shape[1],activation='tanh')(x0)
x2=Dropout(dropout)(x2)
# Make embedding_model
embedding_model = Model([inpx0],x2)
embedding_model.summary()
# Make combined model
model = Sequential()
model.add(Merge([image_model,embedding_model],mode = 'concat', concat_axis=1))
model.add(LSTM(num_hidden_units_lstm, return_sequences=False, go_backwards=True))
model.add(Dense(num_hidden_units_mlp))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.summary()
model.add(Dense(nb_classes))
model.add(Activation(class_activation))
return model
示例13: visual_lstm2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def visual_lstm2(img_vec_dim, activation_1,activation_2, dropout, vocabulary_size,
num_hidden_units_lstm, max_ques_length,
word_emb_dim, num_hidden_layers_mlp,
num_hidden_units_mlp, nb_classes, class_activation,embedding_matrix):
# Make image model
inpx1=Input(shape=(img_vec_dim,))
x1=Dense(embedding_matrix.shape[1], activation=activation_1)(inpx1)
x1=Reshape((1,embedding_matrix.shape[1]))(x1)
image_model = Model([inpx1],x1)
image_model.summary()
# Make language Model
inpx0=Input(shape=(max_ques_length,))
x0=Embedding(vocabulary_size, word_emb_dim, weights=[embedding_matrix], trainable=False)(inpx0)
x2=Dense(embedding_matrix.shape[1],activation=activation_2)(x0)
x2=Dropout(dropout)(x2)
# Make embedding_model
embedding_model = Model([inpx0],x2)
embedding_model.summary()
inpx2=Input(shape=(img_vec_dim,))
x1=Dense(embedding_matrix.shape[1], activation=activation_1)(inpx1)
x3=Reshape((1,embedding_matrix.shape[1]))(x1)
image_model2 = Model([inpx2],x3)
image_model2.summary()
# Make combined model
model = Sequential()
model.add(Merge([image_model,embedding_model, image_model2],mode = 'concat', concat_axis=1))
model.add(Bidirectional(LSTM(num_hidden_units_lstm, return_sequences=False)))
model.add(Dense(num_hidden_units_mlp))
model.add(Activation(activation_1))
model.add(Dropout(dropout))
model.summary()
model.add(Dense(nb_classes))
model.add(Activation(class_activation))
return model
示例14: main
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def main():
df_train = load('adult.data')
df_test = load('adult.test')
df = pd.concat([df_train, df_test])
train_len = len(df_train)
X, y = preprocess(df)
X_train = X[:train_len]
y_train = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
wide = Sequential()
wide.add(Dense(1, input_dim=X_train.shape[1]))
deep = Sequential()
# TODO: add embedding
deep.add(Dense(input_dim=X_train.shape[1], output_dim=100, activation='relu'))
deep.add(Dense(100, activation='relu'))
deep.add(Dense(50, activation='relu'))
deep.add(Dense(1, activation='sigmoid'))
model = Sequential()
model.add(Merge([wide, deep], mode='concat', concat_axis=1))
model.add(Dense(1, activation='sigmoid'))
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit([X_train, X_train], y_train, nb_epoch=10, batch_size=32)
loss, accuracy = model.evaluate([X_test, X_test], y_test)
print('\n', 'test accuracy:', accuracy)
示例15: train
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Merge [as 别名]
def train(x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test, model_path):
batch_size = 16
model_name = 'layer_3_with_dropout_0.5_dropoutWU_0.4_maxlen_200_epoch_300_feature_completion.h5'
left_branch = Sequential()
left_branch.add(Dense(32, input_dim=x_1_train.shape[1]))
# left_branch.add(Flatten())
right_branch = Sequential()
right_branch.add(LSTM(128, return_sequences=True, dropout_U=0.4, dropout_W=0.4, input_shape=x_2_train.shape[1:3]))
right_branch.add(Dropout(0.5))
right_branch.add(LSTM(64, dropout_U=0.4, dropout_W=0.4))
right_branch.add(Dropout(0.5))
right_branch.add(Dense(32))
merged = Merge([left_branch, right_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(12))
final_model.add(Activation('softmax'))
final_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
plot(final_model, to_file=model_path + model_name[:-3] + '.png', show_shapes=True)
plot(right_branch, to_file=model_path + model_name[:-3] + '_right_branch.png', show_shapes=True)
plot(left_branch, to_file=model_path + model_name[:-3] + '_left_branch.png', show_shapes=True)
final_model.fit([x_1_train, x_2_train], y_train, batch_size=batch_size, nb_epoch=300, validation_data=([x_1_test, x_2_test], y_test))
final_model.save(model_path + model_name)
score, acc = final_model.evaluate([x_1_test, x_2_test], y_test, batch_size=batch_size)
print('Test Score', score)
print('Test Accuracy', acc)
return model_path + model_name