本文整理汇总了Python中keras.layers.pooling.AveragePooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python pooling.AveragePooling2D方法的具体用法?Python pooling.AveragePooling2D怎么用?Python pooling.AveragePooling2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.pooling
的用法示例。
在下文中一共展示了pooling.AveragePooling2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
# global context block
x = global_context_block(x)
return x
示例2: __transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例3: transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(ip)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
return x
示例4: transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def transition_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
input: keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = Convolution2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False,
kernel_regularizer=l2(weight_decay))(input)
if dropout_rate is not None:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
return x
示例5: __transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4, attention_module=None):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
# attention_module
if attention_module is not None:
x = attach_attention_module(x, attention_module)
return x
示例6: inception_block_3a
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def inception_block_3a(X):
X_3x3 = fr_utils.conv2d_bn(X,
layer='inception_5a_3x3',
cv1_out=96,
cv1_filter=(1, 1),
cv2_out=384,
cv2_filter=(3, 3),
cv2_strides=(1, 1),
padding=(1, 1))
X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)
X_pool = fr_utils.conv2d_bn(X_pool,
layer='inception_5a_pool',
cv1_out=96,
cv1_filter=(1, 1),
padding=(1, 1))
X_1x1 = fr_utils.conv2d_bn(X,
layer='inception_5a_1x1',
cv1_out=256,
cv1_filter=(1, 1))
inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)
return inception
示例7: _transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def _transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('transition_block'):
x = BatchNormalization(axis=concat_axis, epsilon=1e-5, momentum=0.1)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例8: downsample_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def downsample_block(x, nb_channels, kernel_size=3, bottleneck=True,
l2_reg=1e-4):
if bottleneck:
out = bottleneck_layer(x, nb_channels, kernel_size=kernel_size,
stride=2, l2_reg=l2_reg)
# The output channels is 4x bigger on this case
nb_channels = nb_channels * 4
else:
out = two_conv_layer(x, nb_channels, kernel_size=kernel_size,
stride=2, l2_reg=l2_reg)
# Projection on the shortcut
proj = Convolution2D(nb_channels, 1, 1, subsample=(2, 2),
border_mode='valid', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(x)
# proj = AveragePooling2D((1, 1), (2, 2))(x)
out = merge([proj, out], mode='sum')
return out
示例9: transition
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def transition(x, nb_filter, dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 1x1Conv2D, optional dropout and Maxpooling2D
:param x: keras model
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: model
:rtype: keras model, after applying batch_norm, relu-conv, dropout, maxpool
"""
x = Activation('relu')(x)
x = Convolution2D(nb_filter, 1, 1,
init="he_uniform",
border_mode="same",
bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例10: transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def transition_block(input,nb_filter,dropout_rate=None,pooltype=1,weight_decay=1e-4):
x = BatchNormalization(axis=-1,epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter,(1,1),kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if(dropout_rate):
x = Dropout(dropout_rate)(x)
if(pooltype==2):
x = AveragePooling2D((2,2),strides=(2,2))(x)
elif(pooltype==1):
x = ZeroPadding2D(padding=(0,1))(x)
x = AveragePooling2D((2,2),strides=(2,1))(x)
elif(pooltype==3):
x = AveragePooling2D((2,2),strides=(2,1))(x)
return x,nb_filter
示例11: transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4):
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if(dropout_rate):
x = Dropout(dropout_rate)(x)
if(pooltype == 2):
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif(pooltype == 1):
x = ZeroPadding2D(padding = (0, 1))(x)
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
elif(pooltype == 3):
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
return x, nb_filter
示例12: transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4):
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
if pooltype == 2:
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif pooltype == 1:
x = ZeroPadding2D(padding=(0, 1))(x)
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
elif pooltype == 3:
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
return x, nb_filter
示例13: pyramid_pooling_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def pyramid_pooling_block(input_tensor, bin_sizes):
concat_list = [input_tensor]
h = input_tensor.shape[1].value
w = input_tensor.shape[2].value
for bin_size in bin_sizes:
x = AveragePooling2D(pool_size=(h//bin_size, w//bin_size), strides=(h//bin_size, w//bin_size))(input_tensor)
x = Conv2D(512, kernel_size=1)(x)
x = Lambda(lambda x: tf.image.resize_images(x, (h, w)))(x)
concat_list.append(x)
return concatenate(concat_list)
示例14: get_features
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def get_features(image, model):
'''
get the feature map of all activation layer for given
image and given model
:param image: input image path
:param model: given model
:return: all activation layers features
'''
# image = load_image(image_src)
feature_maps = np.zeros((10, 10, 15104))
activation_layers = ['activation_' + str(i) for i in range(4, 50, 3)]
start_index = 0
for i, layer_name in enumerate(activation_layers):
layer = model.get_layer(layer_name)
nchannel = layer.output_shape[-1]
conv_output = layer.output
# Adujusting pooling size with respect to input layers` size
if layer.output_shape[-2] == 74:
conv_output = AveragePooling2D(pool_size=(7, 7))(conv_output)
if layer.output_shape[-2] == 37:
conv_output = AveragePooling2D(pool_size=(4, 4), border_mode='same')(conv_output)
if layer.output_shape[-2] == 19:
conv_output = AveragePooling2D(pool_size=(2, 2), border_mode='same')(conv_output)
featuremap_function = K.function([model.input, K.learning_phase()], [conv_output])
output = featuremap_function([image, 0])
feature_maps[:, :, start_index:start_index+nchannel] = output[0][0, :, :, :]
start_index = start_index + nchannel
return feature_maps
示例15: __transition_block
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import AveragePooling2D [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4, attention_module=None):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
x = Activation('relu')(x)
x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
# attention_module
if attention_module == 'se_block':
x = se_block(x)
if attention_module == 'cbam_block':
x = cbam_block(x)
return x