本文整理匯總了Python中keras.layers.merge.concatenate方法的典型用法代碼示例。如果您正苦於以下問題:Python merge.concatenate方法的具體用法?Python merge.concatenate怎麽用?Python merge.concatenate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.layers.merge
的用法示例。
在下文中一共展示了merge.concatenate方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: yolo_body
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V2 model CNN body in Keras."""
darknet = Model(inputs, darknet_body()(inputs))
conv20 = compose(
DarknetConv2D_BN_Leaky(1024, (3, 3)),
DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)
conv13 = darknet.layers[43].output
conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
conv21_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(conv21)
x = concatenate([conv21_reshaped, conv20])
x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
return Model(inputs, x)
示例2: loss_net
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def loss_net(x_in, trux_x_in,width, height,style_image_path,content_weight,style_weight):
# Append the initial input to the FastNet input to the VGG inputs
x = concatenate([x_in, trux_x_in], axis=0)
# Normalize the inputs via custom VGG Normalization layer
x = VGGNormalize(name="vgg_normalize")(x)
vgg = VGG16(include_top=False,input_tensor=x)
vgg_output_dict = dict([(layer.name, layer.output) for layer in vgg.layers[-18:]])
vgg_layers = dict([(layer.name, layer) for layer in vgg.layers[-18:]])
if style_weight > 0:
add_style_loss(vgg,style_image_path , vgg_layers, vgg_output_dict, width, height,style_weight)
if content_weight > 0:
add_content_loss(vgg_layers,vgg_output_dict,content_weight)
# Freeze all VGG layers
for layer in vgg.layers[-19:]:
layer.trainable = False
return vgg
示例3: block_inception_a
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_inception_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 96, 1, 1)
branch_1 = conv2d_bn(input, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
示例4: block_reduction_a
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_reduction_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2,2), padding='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
示例5: block_inception_b
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_inception_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 1, 1)
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
示例6: block_reduction_b
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_reduction_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 192, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid')
branch_1 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2,2), padding='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
示例7: concat_images_with_tiled_vector
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def concat_images_with_tiled_vector(images, vector):
"""Combine a set of images with a vector, tiling the vector at each pixel in the images and concatenating on the channel axis.
# Params
images: list of images with the same dimensions
vector: vector to tile on each image. If you have
more than one vector, simply concatenate them
all before calling this function.
# Returns
"""
with K.name_scope('concat_images_with_tiled_vector'):
if not isinstance(images, list):
images = [images]
image_shape = K.int_shape(images[0])
tiled_vector = tile_vector_as_image_channels(vector, image_shape)
images.append(tiled_vector)
combined = K.concatenate(images)
return combined
示例8: forward
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def forward(self):
model_input = Input(shape=(self.maxlen,), dtype='int32', name='token')
x = Token_Embedding(model_input, self.nb_tokens, self.embedding_dim,
self.token_embeddings, True, self.maxlen,
self.embed_dropout_rate, name='token_embeddings')
x = Activation('tanh')(x)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output = Bidirectional(
LSTM(self.rnn_size, return_sequences=True), name="bi_lstm_0")(x)
lstm_1_output = Bidirectional(
LSTM(self.rnn_size, return_sequences=True), name="bi_lstm_1")(lstm_0_output)
x = concatenate([lstm_1_output, lstm_0_output, x], name='concatenate')
x = self.attention_layer(x)
if self.return_attention:
x, weights = x
outputs = tc_output_logits(x, self.nb_classes, self.final_dropout_rate)
if self.return_attention:
outputs.append(weights)
outputs = concatenate(outputs, axis=-1, name='outputs')
self.model = Model(inputs=model_input,
outputs=outputs, name="Bi_LSTM_Attention")
示例9: forward
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def forward(self):
model_input = Input(shape=(self.maxlen,), dtype='int32', name='token')
x = Token_Embedding(model_input, self.nb_tokens, self.embedding_dim,
self.token_embeddings, False, self.maxlen,
self.embed_dropout_rate, name='token_embeddings')
cnn_combine = []
for i in range(len(self.conv_kernel_size)):
cnn = self.cnn_list[i](x)
pool = self.pool_list[i](cnn)
cnn_combine.append(pool)
x = concatenate(cnn_combine, axis=-1)
x = Flatten()(x)
x = Dropout(self.final_dropout_rate)(x)
x = self.fc(x)
outputs = tc_output_logits(x, self.nb_classes, self.final_dropout_rate)
self.model = Model(inputs=model_input,
outputs=outputs, name="TextCNN")
示例10: block_inception_a
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_inception_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 96, 1, 1)
branch_1 = conv2d_bn(input, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
示例11: block_reduction_a
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_reduction_a(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2, 2), padding='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2, 2), padding='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
示例12: block_inception_b
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_inception_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 1, 1)
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
示例13: block_reduction_b
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_reduction_b(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 192, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid')
branch_1 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2, 2), padding='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)
x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
return x
示例14: block_inception_c
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def block_inception_c(input):
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(input, 384, 1, 1)
branch_10 = conv2d_bn(branch_1, 256, 1, 3)
branch_11 = conv2d_bn(branch_1, 256, 3, 1)
branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)
branch_2 = conv2d_bn(input, 384, 1, 1)
branch_2 = conv2d_bn(branch_2, 448, 3, 1)
branch_2 = conv2d_bn(branch_2, 512, 1, 3)
branch_20 = conv2d_bn(branch_2, 256, 1, 3)
branch_21 = conv2d_bn(branch_2, 256, 3, 1)
branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
branch_3 = conv2d_bn(branch_3, 256, 1, 1)
x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
return x
示例15: get_unet_resnet
# 需要導入模塊: from keras.layers import merge [as 別名]
# 或者: from keras.layers.merge import concatenate [as 別名]
def get_unet_resnet(input_shape):
resnet_base = ResNet50(input_shape=input_shape, include_top=False)
if args.show_summary:
resnet_base.summary()
for l in resnet_base.layers:
l.trainable = True
conv1 = resnet_base.get_layer("activation_1").output
conv2 = resnet_base.get_layer("activation_10").output
conv3 = resnet_base.get_layer("activation_22").output
conv4 = resnet_base.get_layer("activation_40").output
conv5 = resnet_base.get_layer("activation_49").output
up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
conv6 = conv_block_simple(up6, 256, "conv6_1")
conv6 = conv_block_simple(conv6, 256, "conv6_2")
up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
conv7 = conv_block_simple(up7, 192, "conv7_1")
conv7 = conv_block_simple(conv7, 192, "conv7_2")
up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
conv8 = conv_block_simple(up8, 128, "conv8_1")
conv8 = conv_block_simple(conv8, 128, "conv8_2")
up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
conv9 = conv_block_simple(up9, 64, "conv9_1")
conv9 = conv_block_simple(conv9, 64, "conv9_2")
vgg = VGG16(input_shape=input_shape, input_tensor=resnet_base.input, include_top=False)
for l in vgg.layers:
l.trainable = False
vgg_first_conv = vgg.get_layer("block1_conv2").output
up10 = concatenate([UpSampling2D()(conv9), resnet_base.input, vgg_first_conv], axis=-1)
conv10 = conv_block_simple(up10, 32, "conv10_1")
conv10 = conv_block_simple(conv10, 32, "conv10_2")
conv10 = SpatialDropout2D(0.2)(conv10)
x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
model = Model(resnet_base.input, x)
return model