本文整理汇总了Python中keras.layers.merge.add方法的典型用法代码示例。如果您正苦于以下问题:Python merge.add方法的具体用法?Python merge.add怎么用?Python merge.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.merge
的用法示例。
在下文中一共展示了merge.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _conv_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
len_convs = len(convs)
for conv in convs:
if count == (len_convs - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
#SPP block uses three pooling layers of sizes [5, 9, 13] with strides one and all outputs together with the input are concatenated to be fed
#to the FC block
示例2: conv_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def conv_block(input_tensor, filters, strides, d_rates):
x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[1], kernel_size=3, strides=strides, padding='same', dilation_rate=d_rates[1])(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x)
x = BatchNormalization()(x)
shortcut = Conv2D(filters[2], kernel_size=1, strides=strides)(input_tensor)
shortcut = BatchNormalization()(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
示例3: identity_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def identity_block(input_tensor, filters, d_rates):
x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[1], kernel_size=3, padding='same', dilation_rate=d_rates[1])(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x)
x = BatchNormalization()(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
示例4: _conv_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _conv_block(inp, convs, do_skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and do_skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # unlike tensorflow darknet prefer left and top paddings
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if do_skip else x
示例5: base_ab_seq_model
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def base_ab_seq_model(max_cdr_len):
input_ab = Input(shape=(max_cdr_len, NUM_FEATURES))
label_mask = Input(shape=(max_cdr_len,))
seq = MaskingByLambda(mask_by_input(label_mask))(input_ab)
loc_fts = MaskedConvolution1D(28, 3, padding='same', activation='elu',
kernel_regularizer=l2(0.01))(seq)
res_fts = add([seq, loc_fts])
glb_fts = Bidirectional(LSTM(256, dropout=0.15, recurrent_dropout=0.2,
return_sequences=True),
merge_mode='concat')(res_fts)
fts = Dropout(0.3)(glb_fts)
probs = TimeDistributed(Dense(1, activation='sigmoid',
kernel_regularizer=l2(0.01)))(fts)
return input_ab, label_mask, res_fts, probs
示例6: _conv_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
示例7: _shortcut3d
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _shortcut3d(input, residual):
"""3D shortcut to match input and residual and merges them with "sum"."""
stride_dim1 = ceil(input._keras_shape[DIM1_AXIS] \
/ residual._keras_shape[DIM1_AXIS])
stride_dim2 = ceil(input._keras_shape[DIM2_AXIS] \
/ residual._keras_shape[DIM2_AXIS])
stride_dim3 = ceil(input._keras_shape[DIM3_AXIS] \
/ residual._keras_shape[DIM3_AXIS])
equal_channels = residual._keras_shape[CHANNEL_AXIS] \
== input._keras_shape[CHANNEL_AXIS]
shortcut = input
if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \
or not equal_channels:
shortcut = Conv3D(
filters=residual._keras_shape[CHANNEL_AXIS],
kernel_size=(1, 1, 1),
strides=(stride_dim1, stride_dim2, stride_dim3),
kernel_initializer="he_normal", padding="valid",
kernel_regularizer=l2(1e-4)
)(input)
return add([shortcut, residual])
示例8: _shortcut
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _shortcut(input, residual):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001))(input)
return add([shortcut, residual])
示例9: dpcnn_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def dpcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first):
def f(x):
x = MaxPooling1D(pool_size=3, strides=2)(x)
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(x)
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(main)
x = add([main, x])
return x
return f
示例10: vdcnn_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def vdcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first, last_block):
def f(x):
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(x)
x = add([main, x])
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(x)
x = add([main, x])
if not last_block:
x = MaxPooling1D(pool_size=3, strides=2)(x)
return x
return f
示例11: _AddShortCut
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _AddShortCut(self, x):
x = add(x)
return self._BNRelu(x)
示例12: vgg_upsampling
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def vgg_upsampling(classes, target_shape=None, scale=1, weight_decay=0., block_name='featx'):
"""A VGG convolutional block with bilinear upsampling for decoding.
:param classes: Integer, number of classes
:param scale: Float, scale factor to the input feature, varing from 0 to 1
:param target_shape: 4D Tuples with targe_height, target_width as
the 2nd, 3rd elements if `channels_last` or as the 3rd, 4th elements if
`channels_first`.
>>> from keras_fcn.blocks import vgg_upsampling
>>> feat1, feat2, feat3 = feat_pyramid[:3]
>>> y = vgg_upsampling(classes=21, target_shape=(None, 14, 14, None),
>>> scale=1, block_name='feat1')(feat1, None)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 28, 28, None),
>>> scale=1e-2, block_name='feat2')(feat2, y)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 224, 224, None),
>>> scale=1e-4, block_name='feat3')(feat3, y)
"""
def f(x, y):
score = Conv2D(filters=classes, kernel_size=(1, 1),
activation='linear',
padding='valid',
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='score_{}'.format(block_name))(x)
if y is not None:
def scaling(xx, ss=1):
return xx * ss
scaled = Lambda(scaling, arguments={'ss': scale},
name='scale_{}'.format(block_name))(score)
score = add([y, scaled])
upscore = BilinearUpSampling2D(
target_shape=target_shape,
name='upscore_{}'.format(block_name))(score)
return upscore
return f
示例13: _shortcut
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=CHANNEL_AXIS,
name=bn_name_base)(shortcut)
return add([shortcut, residual])
示例14: __conv2_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def __conv2_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
# Check if input number of filters is same as 16 * k, else create
# convolution2d for this input
if K.image_data_format() == 'channels_first':
if init._keras_shape[1] != 16 * k:
init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 16 * k:
init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(16 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(16 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m
示例15: __conv3_block
# 需要导入模块: from keras.layers import merge [as 别名]
# 或者: from keras.layers.merge import add [as 别名]
def __conv3_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
# Check if input number of filters is same as 32 * k, else
# create convolution2d for this input
if K.image_data_format() == 'channels_first':
if init._keras_shape[1] != 32 * k:
init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
else:
if init._keras_shape[-1] != 32 * k:
init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
x = Conv2D(32 * k, (3, 3), padding='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0:
x = Dropout(dropout)(x)
x = Conv2D(32 * k, (3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = add([init, x])
return m