本文整理汇总了Python中keras.layers.pooling.MaxPooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python pooling.MaxPooling2D方法的具体用法?Python pooling.MaxPooling2D怎么用?Python pooling.MaxPooling2D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.pooling
的用法示例。
在下文中一共展示了pooling.MaxPooling2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __initial_conv_block_inception
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def __initial_conv_block_inception(input, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the inception resnext
Args:
input: input tensor
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = LeakyReLU()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
示例2: inception_block_1c
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def inception_block_1c(X):
X_3x3 = fr_utils.conv2d_bn(X,
layer='inception_3c_3x3',
cv1_out=128,
cv1_filter=(1, 1),
cv2_out=256,
cv2_filter=(3, 3),
cv2_strides=(2, 2),
padding=(1, 1))
X_5x5 = fr_utils.conv2d_bn(X,
layer='inception_3c_5x5',
cv1_out=32,
cv1_filter=(1, 1),
cv2_out=64,
cv2_filter=(5, 5),
cv2_strides=(2, 2),
padding=(2, 2))
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool)
inception = concatenate([X_3x3, X_5x5, X_pool], axis=1)
return inception
示例3: inception_block_2b
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def inception_block_2b(X):
#inception4e
X_3x3 = fr_utils.conv2d_bn(X,
layer='inception_4e_3x3',
cv1_out=160,
cv1_filter=(1, 1),
cv2_out=256,
cv2_filter=(3, 3),
cv2_strides=(2, 2),
padding=(1, 1))
X_5x5 = fr_utils.conv2d_bn(X,
layer='inception_4e_5x5',
cv1_out=64,
cv1_filter=(1, 1),
cv2_out=128,
cv2_filter=(5, 5),
cv2_strides=(2, 2),
padding=(2, 2))
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool)
inception = concatenate([X_3x3, X_5x5, X_pool], axis=1)
return inception
示例4: inception_block_3b
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def inception_block_3b(X):
X_3x3 = fr_utils.conv2d_bn(X,
layer='inception_5b_3x3',
cv1_out=96,
cv1_filter=(1, 1),
cv2_out=384,
cv2_filter=(3, 3),
cv2_strides=(1, 1),
padding=(1, 1))
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = fr_utils.conv2d_bn(X_pool,
layer='inception_5b_pool',
cv1_out=96,
cv1_filter=(1, 1))
X_pool = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_pool)
X_1x1 = fr_utils.conv2d_bn(X,
layer='inception_5b_1x1',
cv1_out=256,
cv1_filter=(1, 1))
inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)
return inception
示例5: get_simple_unet
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def get_simple_unet(input_shape):
img_input = Input(input_shape)
conv1 = conv_block_simple(img_input, 32, "conv1_1")
conv1 = conv_block_simple(conv1, 32, "conv1_2")
pool1 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool1")(conv1)
conv2 = conv_block_simple(pool1, 64, "conv2_1")
conv2 = conv_block_simple(conv2, 64, "conv2_2")
pool2 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool2")(conv2)
conv3 = conv_block_simple(pool2, 128, "conv3_1")
conv3 = conv_block_simple(conv3, 128, "conv3_2")
pool3 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool3")(conv3)
conv4 = conv_block_simple(pool3, 256, "conv4_1")
conv4 = conv_block_simple(conv4, 256, "conv4_2")
conv4 = conv_block_simple(conv4, 256, "conv4_3")
up5 = concatenate([UpSampling2D()(conv4), conv3], axis=-1)
conv5 = conv_block_simple(up5, 128, "conv5_1")
conv5 = conv_block_simple(conv5, 128, "conv5_2")
up6 = concatenate([UpSampling2D()(conv5), conv2], axis=-1)
conv6 = conv_block_simple(up6, 64, "conv6_1")
conv6 = conv_block_simple(conv6, 64, "conv6_2")
up7 = concatenate([UpSampling2D()(conv6), conv1], axis=-1)
conv7 = conv_block_simple(up7, 32, "conv7_1")
conv7 = conv_block_simple(conv7, 32, "conv7_2")
conv7 = SpatialDropout2D(0.2)(conv7)
prediction = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv7)
model = Model(img_input, prediction)
return model
示例6: get_maxpool
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def get_maxpool(params):
return MaxPooling2D(
strides=params.get('stride', 1),
pool_size=params.get('size', 1),
padding="same")
示例7: inception_block_1a
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def inception_block_1a(X):
"""
Implementation of an inception block
"""
X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)
X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)
X_3x3 = Activation('relu')(X_3x3)
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)
X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)
X_3x3 = Activation('relu')(X_3x3)
X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)
X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)
X_5x5 = Activation('relu')(X_5x5)
X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)
X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)
X_5x5 = Activation('relu')(X_5x5)
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)
X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)
X_pool = Activation('relu')(X_pool)
X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)
X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)
X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)
X_1x1 = Activation('relu')(X_1x1)
# CONCAT
inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)
return inception
示例8: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def create_model(input_shape, config, is_training=True):
weight_decay = 0.001
model = Sequential()
model.add(Convolution2D(32, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(64, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1024, W_regularizer=l2(weight_decay), activation="relu"))
model.add(Dense(config["num_classes"], activation="softmax"))
return model
示例9: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def create_model(input_shape, config, is_training=True):
weight_decay = 0.001
model = Sequential()
model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
# Restore layer weights
model.load_weights("logs/2016-12-16-15-58-20/weights.131.model", by_name=True)
# Retrain last two layers from scratch
model.add(Dense(1024, W_regularizer=l2(weight_decay), activation="relu"))
model.add(Dense(config["num_classes"], activation="softmax"))
return model
示例10: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def create_model(input_shape, config, is_training=True):
weight_decay = 0.001
model = Sequential()
model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1024, W_regularizer=l2(weight_decay), activation="relu"))
model.add(Dense(config["num_classes"], activation="softmax"))
return model
示例11: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def create_model(input_shape, config, is_training=True):
weight_decay = 0.001
model = Sequential()
model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(1024, W_regularizer=l2(weight_decay), activation="relu"))
model.add(Dense(config["num_classes"], activation="softmax"))
return model
示例12: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def create_model(input_shape, config, is_training=True):
weight_decay = 0.001
model = Sequential()
model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# (bs, y, x, c) --> (bs, x, y, c)
model.add(Permute((2, 1, 3)))
# (bs, x, y, c) --> (bs, x, y * c)
bs, x, y, c = model.layers[-1].output_shape
model.add(Reshape((x, y*c)))
model.add(Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat"))
model.add(Dense(config["num_classes"], activation="softmax"))
return model
示例13: conv2d_bn
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def conv2d_bn(input_layer, index_layer,
filters=16,
kernel_size=(3, 3),
strides=(1, 1)):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
conv = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
use_bias=False,
name="conv{0}".format(index_layer))(input_layer)
bn = BatchNormalization(axis=bn_axis, scale=False, name="bn{0}".format(index_layer))(conv)
act = Activation('relu', name="act{0}".format(index_layer))(bn)
pooling = MaxPooling2D(pool_size=2)(act)
x = Dropout(0.3)(pooling)
return x
示例14: build_discriminator
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def build_discriminator():
dis_model = Sequential()
dis_model.add(
Conv2D(128, (5, 5),
padding='same',
input_shape=(64, 64, 3))
)
dis_model.add(LeakyReLU(alpha=0.2))
dis_model.add(MaxPooling2D(pool_size=(2, 2)))
dis_model.add(Conv2D(256, (3, 3)))
dis_model.add(LeakyReLU(alpha=0.2))
dis_model.add(MaxPooling2D(pool_size=(2, 2)))
dis_model.add(Conv2D(512, (3, 3)))
dis_model.add(LeakyReLU(alpha=0.2))
dis_model.add(MaxPooling2D(pool_size=(2, 2)))
dis_model.add(Flatten())
dis_model.add(Dense(1024))
dis_model.add(LeakyReLU(alpha=0.2))
dis_model.add(Dense(1))
dis_model.add(Activation('sigmoid'))
return dis_model
示例15: resnet_graph
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import MaxPooling2D [as 别名]
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = ZeroPadding2D((3, 3))(input_image)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = Activation('relu')(x)
C1 = x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################