本文整理匯總了Python中keras.layers.merge.Concatenate方法的典型用法代碼示例。如果您正苦於以下問題:Python merge.Concatenate方法的具體用法?Python merge.Concatenate怎麽用?Python merge.Concatenate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.layers.merge
的用法示例。
在下文中一共展示了merge.Concatenate方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_pyramid_pooling_module
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def build_pyramid_pooling_module(res, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %
(feature_map_size, ))
interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
interp_block6 = interp_block(res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
# shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
示例2: build_pyramid_pooling_module
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def build_pyramid_pooling_module(res, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
interp_block6 = interp_block(res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
# shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
示例3: build
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def build(self, vocabs=None):
if self._keras_model:
return
if vocabs is None and self._word_vector_init is not None:
raise ValueError('If word_vector_init is not None, build method '
'must be called with vocabs that are not None!')
image_input, image_embedding = self._build_image_embedding()
sentence_input, word_embedding = self._build_word_embedding(vocabs)
sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
sequence_output = self._build_sequence_model(sequence_input)
model = Model(inputs=[image_input, sentence_input],
outputs=sequence_output)
model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
loss=categorical_crossentropy_from_logits,
metrics=[categorical_accuracy_with_variable_timestep])
self._keras_model = model
示例4: dense_block
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
feature_list = [x]
for i in range(nb_layers):
x = conv_block(x, growth_rate, dropout_rate, weight_decay)
feature_list.append(x)
x = Concatenate(axis=concat_axis)(feature_list)
nb_filter += growth_rate
return x, nb_filter
示例5: GetLSTMEncoder
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def GetLSTMEncoder(xin, uin, dense_size, lstm_size, dense_layers=1,
lstm_layers=1):
'''
Get LSTM encoder.
'''
x = xin
for _ in xrange(dense_layers):
if uin is not None:
x = Concatenate(axis=-1)([x, uin])
x = TimeDistributed(Dense(dense_size))(x)
x = TimeDistributed(Activation('relu'))(x)
for i in xrange(lstm_layers):
if i == lstm_layers - 1:
sequence_out = False
else:
sequence_out = True
#sequence_out = True
x = LSTM(lstm_size, return_sequences=sequence_out)(x)
x = Activation('relu')(x)
return x
示例6: MakeJigsawsMultiDecoder
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def MakeJigsawsMultiDecoder(model, decoder, num_images=4, h_dim=(12,16)):
'''
Make multiple images
'''
h = Input((h_dim[0], h_dim[1], 64),name="h_in")
xs = []
for i in range(num_images):
xi = h
xi = AddConv2D(xi, 64, [5, 5], stride=1,
dropout_rate=0.)
xi = AddConv2D(xi, model.encoder_channels, [5, 5], stride=1,
dropout_rate=0.)
xi = decoder(xi)
img_x = Lambda(
lambda y: K.expand_dims(y, 1),
name="img_hypothesis_%d"%i)(xi)
xs.append(img_x)
img_out = Concatenate(axis=1)(xs)
mm = Model(h, img_out, name="multi")
mm.compile(loss="mae", optimizer=model.getOptimizer())
return mm
示例7: ConvolutionLayer
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
x = Input(shape=(input_shape,), name='input')
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding",
weights=[embedding_matrix], trainable=word_trainable)(x)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation=act,
strides=1,
kernel_initializer=init)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dense(hidden_dim, activation="relu")(z)
y = Dense(n_classes, activation="softmax")(z)
return Model(inputs=x, outputs=y, name='classifier')
示例8: ConvolutionLayer
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
vocab_sz=None,
embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'):
if embedding_matrix is not None:
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,),
weights=[embedding_matrix], trainable=word_trainable)(x)
else:
z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation=act,
strides=1,
kernel_initializer=init)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dense(hidden_dim, activation="relu")(z)
y = Dense(n_classes, activation="softmax")(z)
return Model(inputs=x, outputs=y)
示例9: to_multi_gpu
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def to_multi_gpu(model, n_gpus=2):
"""
Given a keras [model], return an equivalent model which parallelizes
the computation over [n_gpus] GPUs.
Each GPU gets a slice of the input batch, applies the model on that slice
and later the outputs of the models are concatenated to a single tensor,
hence the user sees a model that behaves the same as the original.
"""
with tf.device('/cpu:0'):
x = Input(model.input_shape[1:], name="input1")
towers = []
for g in range(n_gpus):
with tf.device('/gpu:' + str(g)):
slice_g = Lambda(slice_batch,
lambda shape: shape,
arguments={'n_gpus':n_gpus, 'part':g})(x)
towers.append(model(slice_g))
with tf.device('/cpu:0'):
merged = Concatenate(axis=0)(towers)
return Model(inputs=[x], outputs=[merged])
示例10: conv_embedding
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def conv_embedding(images, output, other_features = [], dropout_rate=0.1,
embedding_dropout=0.1, embedding_l2=0.05, constrain_norm=True):
print("Building conv net")
x_embedding = architectures.convnet(images, Dense(64, activation='linear'),
dropout_rate=embedding_dropout,
activations='relu',
l2_rate=embedding_l2, constrain_norm=constrain_norm)
if len(other_features) > 0:
embedd = Concatenate(axis=1)([x_embedding] + other_features)
else:
embedd = x_embedding
out = architectures.feed_forward_net(embedd, output,
hidden_layers=[32],
dropout_rate=dropout_rate,
activations='relu', constrain_norm=constrain_norm)
return out
示例11: build_pyramid_pooling_module
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def build_pyramid_pooling_module(res, input_shape, nb_classes, sigmoid=False, output_size=None):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape)
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
print("PSP module will interpolate to a final feature map size of %s" %
(feature_map_size, ))
interp_block1 = psp_block(res, 1, feature_map_size, input_shape)
interp_block2 = psp_block(res, 2, feature_map_size, input_shape)
interp_block3 = psp_block(res, 3, feature_map_size, input_shape)
interp_block6 = psp_block(res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
res = Concatenate()([interp_block1,
interp_block2,
interp_block3,
interp_block6,
res])
x = Conv2D(512, (1, 1), strides=(1, 1), padding="same", name="class_psp_reduce_conv", use_bias=False)(res)
x = resnet.BN(bn_axis, name="class_psp_reduce_bn")(x)
x = Activation('relu')(x)
x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="class_psp_final_conv")(x)
if output_size:
x = Upsampling(output_size)(x)
if sigmoid:
x = Activation('sigmoid')(x)
return x
示例12: output_of_lambda2
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def output_of_lambda2(input_shape):
return (input_shape[0], audio_dim)
# ################################################################################
# #Level 2
# #concatenate level 1 output to be sent to hfusion
# fused_tensor=Concatenate(axis=2)([context_1_2,context_1_3,context_2_3])
示例13: GAN
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def GAN(g,d,img_size,n_filters_g, n_filters_d, alpha_recip, init_lr, name='gan'):
"""
GAN (that binds generator and discriminator)
"""
img_h, img_w=img_size[0], img_size[1]
img_ch=3
seg_ch=1
fundus = Input((img_h, img_w, img_ch))
vessel = Input((img_h, img_w, seg_ch))
fake_vessel=g(fundus)
fake_pair=Concatenate(axis=3)([fundus, fake_vessel])
gan=Model([fundus, vessel], d(fake_pair), name=name)
def gan_loss(y_true, y_pred):
y_true_flat = K.batch_flatten(y_true)
y_pred_flat = K.batch_flatten(y_pred)
L_adv = objectives.binary_crossentropy(y_true_flat, y_pred_flat)
# L_adv = objectives.mean_squared_error(y_true_flat, y_pred_flat)
vessel_flat = K.batch_flatten(vessel)
fake_vessel_flat = K.batch_flatten(fake_vessel)
L_seg = objectives.binary_crossentropy(vessel_flat, fake_vessel_flat)
# L_seg = objectives.mean_absolute_error(vessel_flat, fake_vessel_flat)
return alpha_recip*L_adv + L_seg
gan.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=gan_loss, metrics=['accuracy'])
return gan
示例14: cnn_model
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def cnn_model(max_len=400,
vocabulary_size=20000,
embedding_dim=128,
hidden_dim=128,
num_filters=512,
filter_sizes="3,4,5",
num_classses=4,
dropout=0.5):
print("Creating text CNN Model...")
# a tensor
inputs = Input(shape=(max_len,), dtype='int32')
# emb
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim,
input_length=max_len, name="embedding")(inputs)
# convolution block
if "," in filter_sizes:
filter_sizes = filter_sizes.split(",")
else:
filter_sizes = [3, 4, 5]
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=int(sz),
strides=1,
padding='valid',
activation='relu')(embedding)
conv = MaxPooling1D()(conv)
conv = Flatten()(conv)
conv_blocks.append(conv)
conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
dropout_layer = Dropout(dropout)(conv_concate)
output = Dense(hidden_dim, activation='relu')(dropout_layer)
output = Dense(num_classses, activation='softmax')(output)
# model
model = Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
示例15: concatenate_layers
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import Concatenate [as 別名]
def concatenate_layers(inputs, concat_axis, mode='concat'):
if KERAS_2:
assert mode == 'concat', "Only concatenation is supported in this wrapper"
return Concatenate(axis=concat_axis)(inputs)
else:
return merge(inputs=inputs, concat_axis=concat_axis, mode=mode)