本文整理汇总了Python中keras.layers.merge方法的典型用法代码示例。如果您正苦于以下问题:Python layers.merge方法的具体用法?Python layers.merge怎么用?Python layers.merge使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.merge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_encoder
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def build_encoder(self):
# Encoder
img = Input(shape=self.img_shape)
h = Flatten()(img)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
mu = Dense(self.latent_dim)(h)
log_var = Dense(self.latent_dim)(h)
latent_repr = merge([mu, log_var],
mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
output_shape=lambda p: p[0])
return Model(img, latent_repr)
示例2: yolo_body
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V2 model CNN body in Keras."""
darknet = Model(inputs, darknet_body()(inputs))
conv13 = darknet.get_layer('batchnormalization_13').output
conv20 = compose(
DarknetConv2D_BN_Leaky(1024, 3, 3),
DarknetConv2D_BN_Leaky(1024, 3, 3))(darknet.output)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
conv13_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(conv13)
# Concat conv13 with conv20.
x = merge([conv13_reshaped, conv20], mode='concat')
x = DarknetConv2D_BN_Leaky(1024, 3, 3)(x)
x = DarknetConv2D(num_anchors * (num_classes + 5), 1, 1)(x)
return Model(inputs, x)
示例3: get_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def get_model(num_users, num_items, latent_dim, regs=[0,0]):
# Input variables
user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
item_input = Input(shape=(1,), dtype='int32', name = 'item_input')
MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding',
init = init_normal, W_regularizer = l2(regs[0]), input_length=1)
MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding',
init = init_normal, W_regularizer = l2(regs[1]), input_length=1)
# Crucial to flatten an embedding vector!
user_latent = Flatten()(MF_Embedding_User(user_input))
item_latent = Flatten()(MF_Embedding_Item(item_input))
# Element-wise product of user and item embeddings
predict_vector = merge([user_latent, item_latent], mode = 'mul')
# Final prediction layer
#prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector)
prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector)
model = Model(input=[user_input, item_input],
output=prediction)
return model
示例4: block_inception_a
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def block_inception_a(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 96, 1, 1)
branch_1 = conv2d_bn(input, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3)
branch_2 = conv2d_bn(input, 64, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 3, 3)
branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
branch_3 = conv2d_bn(branch_3, 96, 1, 1)
x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
return x
示例5: block_reduction_a
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def block_reduction_a(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2,2), border_mode='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 3, 3, subsample=(2,2), border_mode='valid')
branch_2 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input)
x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
return x
示例6: block_inception_b
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def block_inception_b(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 384, 1, 1)
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 7, 1)
branch_2 = conv2d_bn(input, 192, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 1, 7)
branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
branch_3 = conv2d_bn(branch_3, 128, 1, 1)
x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
return x
示例7: block_reduction_b
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def block_reduction_b(input):
if K.image_dim_ordering() == "th":
channel_axis = 1
else:
channel_axis = -1
branch_0 = conv2d_bn(input, 192, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 3, 3, subsample=(2, 2), border_mode='valid')
branch_1 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 3, 3, subsample=(2,2), border_mode='valid')
branch_2 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(input)
x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
return x
示例8: _build
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def _build(self):
print('Building Graph ...')
inputs = Input(shape=(self.window_size, self.userTagIntent_vocab_size),
name='tagIntent_input')
lstm_forward = LSTM(output_dim=self.hidden_size,
return_sequences=False,
name='LSTM_forward')(inputs)
lstm_forward = Dropout(self.dropout)(lstm_forward)
lstm_backward = LSTM(output_dim=self.hidden_size,
return_sequences=False,
go_backwards=True,
name='LSTM_backward')(inputs)
lstm_backward = Dropout(self.dropout)(lstm_backward)
lstm_concat = merge([lstm_forward, lstm_backward],
mode='concat', concat_axis=-1,
name='merge_bidirections')
act_softmax = Dense(output_dim=self.agentAct_vocab_size,
activation='sigmoid')(lstm_concat)
self.model = Model(input=inputs, output=act_softmax)
self.model.compile(optimizer=self.optimizer,
loss='binary_crossentropy')
示例9: identity_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def identity_block(input_tensor, kernel_size, filters, stage, block):
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
border_mode='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum')
x = Activation('relu')(x)
return x
示例10: conv_block_atrous
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def conv_block_atrous(input_tensor, kernel_size, filters, stage, block, atrous_rate=(2, 2)):
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = AtrousConvolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
atrous_rate=atrous_rate, name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = merge([x, shortcut], mode='sum')
x = Activation('relu')(x)
return x
示例11: downsample_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def downsample_block(x, nb_channels, kernel_size=3, bottleneck=True,
l2_reg=1e-4):
if bottleneck:
out = bottleneck_layer(x, nb_channels, kernel_size=kernel_size,
stride=2, l2_reg=l2_reg)
# The output channels is 4x bigger on this case
nb_channels = nb_channels * 4
else:
out = two_conv_layer(x, nb_channels, kernel_size=kernel_size,
stride=2, l2_reg=l2_reg)
# Projection on the shortcut
proj = Convolution2D(nb_channels, 1, 1, subsample=(2, 2),
border_mode='valid', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(x)
# proj = AveragePooling2D((1, 1), (2, 2))(x)
out = merge([proj, out], mode='sum')
return out
示例12: identity_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def identity_block(x, nb_filter, kernel_size=3):
k1, k2, k3 = nb_filter
shortcut = x
out = Conv2D(k1, kernel_size=(1,1), strides=(1,1),padding="valid",activation="relu")(x)
out = BatchNormalization(axis=3)(out)
out = Conv2D(k2, kernel_size=(3,3), strides=(1,1), padding='same',activation="relu")(out)
out = BatchNormalization(axis=3)(out)
out = Conv2D(k3, kernel_size=(1,1), strides=(1,1),padding="valid")(out)
out = BatchNormalization(axis=3)(out)
# out = merge([out, shortcut], mode='sum')
out= layers.add([out,shortcut])
out = Activation('relu')(out)
return out
示例13: conv_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def conv_block(x, nb_filter, kernel_size=3):
k1, k2, k3 = nb_filter
shortcut = x
out = Conv2D(k1, kernel_size=(1,1), strides=(2,2), padding="valid",activation="relu")(x)
out = BatchNormalization(axis=3)(out)
out = out = Conv2D(k2, kernel_size=(kernel_size,kernel_size), strides=(1,1), padding="same",activation="relu")(out)
out = BatchNormalization()(out)
out = Conv2D(k3, kernel_size=(1,1), strides=(1,1), padding="valid")(out)
out = BatchNormalization(axis=3)(out)
shortcut = Conv2D(k3, kernel_size=(1,1), strides=(2,2), padding="valid")(shortcut)
shortcut = BatchNormalization(axis=3)(shortcut)
# out = merge([out, shortcut], mode='sum')
out = layers.add([out, shortcut])
out = Activation('relu')(out)
return out
示例14: _shortcut
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def _shortcut(input, residual):
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
stride_width = input._keras_shape[2] / residual._keras_shape[2]
stride_height = input._keras_shape[3] / residual._keras_shape[3]
equal_channels = residual._keras_shape[1] == input._keras_shape[1]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Convolution2D(nb_filter=residual._keras_shape[1], nb_row=1, nb_col=1,
subsample=(stride_width, stride_height),
init="he_normal", border_mode="valid")(input)
return merge([shortcut, residual], mode="sum")
# Builds a residual block with repeating bottleneck blocks.
示例15: _up_block
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import merge [as 别名]
def _up_block(block,mrge, nb_filters):
up = merge([Convolution2D(2*nb_filters, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(block)), mrge], mode='concat', concat_axis=1)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(up)
conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(up)
conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
# conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
return conv
# http://arxiv.org/pdf/1512.03385v1.pdf
# 50 Layer resnet