本文整理汇总了Python中tensorflow.keras.layers.MaxPooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.MaxPooling2D方法的具体用法?Python layers.MaxPooling2D怎么用?Python layers.MaxPooling2D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.MaxPooling2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: conv_layer
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def conv_layer(inputs,
filters=32,
kernel_size=3,
strides=1,
use_maxpool=True,
postfix=None,
activation=None):
x = conv2d(inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
name='conv'+postfix)
x = BatchNormalization(name="bn"+postfix)(x)
x = ELU(name='elu'+postfix)(x)
if use_maxpool:
x = MaxPooling2D(name='pool'+postfix)(x)
return x
示例2: conv_layer
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def conv_layer(inputs,
filters=32,
kernel_size=3,
strides=1,
use_maxpool=True,
postfix=None,
activation=None):
"""Helper function to build Conv2D-BN-ReLU layer
with optional MaxPooling2D.
"""
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer='he_normal',
name="conv_"+postfix,
padding='same')(inputs)
x = BatchNormalization(name="bn_"+postfix)(x)
x = Activation('relu', name='relu_'+postfix)(x)
if use_maxpool:
x = MaxPooling2D(name='pool'+postfix)(x)
return x
示例3: get_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def get_model(args):
model = models.Sequential()
model.add(
layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
model.add(layers.Dropout(args.dropout))
model.add(layers.Flatten())
model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
loss=args.loss,
metrics=['accuracy'])
return model
示例4: build_pnet
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def build_pnet(self, input_shape=None):
if input_shape is None:
input_shape = (None, None, 3)
p_inp = Input(input_shape)
p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp)
p_layer = PReLU(shared_axes=[1, 2])(p_layer)
p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer)
p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer)
p_layer = PReLU(shared_axes=[1, 2])(p_layer)
p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer)
p_layer = PReLU(shared_axes=[1, 2])(p_layer)
p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer)
p_layer_out1 = Softmax(axis=3)(p_layer_out1)
p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer)
p_net = Model(p_inp, [p_layer_out2, p_layer_out1])
return p_net
示例5: residual
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def residual(x, num_filters,
kernel_size=(3, 3),
activation='relu',
pool_strides=(2, 2),
max_pooling=True):
"Residual block."
if max_pooling:
res = layers.Conv2D(num_filters, kernel_size=(
1, 1), strides=pool_strides, padding='same')(x)
elif num_filters != keras.backend.int_shape(x)[-1]:
res = layers.Conv2D(num_filters, kernel_size=(1, 1), padding='same')(x)
else:
res = x
x = sep_conv(x, num_filters, kernel_size, activation)
x = sep_conv(x, num_filters, kernel_size, activation)
if max_pooling:
x = layers.MaxPooling2D(
kernel_size, strides=pool_strides, padding='same')(x)
x = layers.add([x, res])
return x
示例6: _initial_conv_block_inception
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the DPN
Args:
input: input tensor
initial_conv_filters: number of filters for initial conv block
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
示例7: down_stage
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def down_stage(inputs, filters, kernel_size=3,
activation="relu", padding="SAME"):
conv = Conv2D(filters, kernel_size,
activation=activation, padding=padding)(inputs)
conv = GroupNormalization()(conv)
conv = Conv2D(filters, kernel_size,
activation=activation, padding=padding)(conv)
conv = GroupNormalization()(conv)
pool = MaxPooling2D()(conv)
return conv, pool
示例8: make_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def make_model(**kwargs) -> tf.keras.Model:
# Model is based on MicronNet: https://arxiv.org/abs/1804.00497v3
img_size = 48
NUM_CLASSES = 43
eps = 1e-6
inputs = Input(shape=(img_size, img_size, 3))
x = Conv2D(1, (1, 1), padding="same")(inputs)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = Conv2D(29, (5, 5), padding="same")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Conv2D(59, (3, 3), padding="same")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Conv2D(74, (3, 3), padding="same")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Flatten()(x)
x = Dense(300)(x)
x = Activation("relu")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Dense(300, activation="relu")(x)
predictions = Dense(NUM_CLASSES, activation="softmax")(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(
optimizer=tf.keras.optimizers.SGD(
lr=0.01, decay=1e-6, momentum=0.9, nesterov=True
),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"],
)
return model
示例9: make_cifar_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def make_cifar_model(**kwargs) -> tf.keras.Model:
model = Sequential()
model.add(
Conv2D(
filters=4,
kernel_size=(5, 5),
strides=1,
activation="relu",
input_shape=(32, 32, 3),
)
)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(
Conv2D(
filters=10,
kernel_size=(5, 5),
strides=1,
activation="relu",
input_shape=(23, 23, 4),
)
)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(lr=0.003),
metrics=["accuracy"],
)
return model
示例10: make_mnist_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def make_mnist_model(**kwargs) -> tf.keras.Model:
model = Sequential()
model.add(
Conv2D(
filters=4,
kernel_size=(5, 5),
strides=1,
activation="relu",
input_shape=(28, 28, 1),
)
)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(
Conv2D(
filters=10,
kernel_size=(5, 5),
strides=1,
activation="relu",
input_shape=(23, 23, 4),
)
)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(lr=0.003),
metrics=["accuracy"],
)
return model
示例11: __init__
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def __init__(self,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
**kwargs):
super(MaxPool2d, self).__init__(**kwargs)
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
self.use_stride = (strides[0] > 1) or (strides[1] > 1)
self.ceil_mode = ceil_mode and self.use_stride
self.use_pad = (padding[0] > 0) or (padding[1] > 0)
if self.ceil_mode:
self.padding = padding
self.pool_size = pool_size
self.strides = strides
self.data_format = data_format
elif self.use_pad:
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
self.pool = nn.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding="valid",
data_format=data_format)
示例12: make_layers
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def make_layers(cfg,
inputs,
batch_norm=True,
in_channels=1):
"""Helper function to ease the creation of VGG
network model
Arguments:
cfg (dict): Summarizes the network layer
configuration
inputs (tensor): Input from previous layer
batch_norm (Bool): Whether to use batch norm
between Conv2D and ReLU
in_channel (int): Number of input channels
"""
x = inputs
for layer in cfg:
if layer == 'M':
x = MaxPooling2D()(x)
elif layer == 'A':
x = AveragePooling2D(pool_size=3)(x)
else:
x = Conv2D(layer,
kernel_size=3,
padding='same',
kernel_initializer='he_normal'
)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
示例13: get_model_meta
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def get_model_meta(filename):
print("Loading model " + filename)
global use_tf_keras
global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
try:
from keras.models import load_model as load_model_keras
ret = get_model_meta_real(filename, load_model_keras)
# model is successfully loaded. Import layers from keras
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import LeakyReLU
from keras import regularizers
from keras import backend as K
print("Model imported using keras")
except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError):
raise
except:
print("Failed to load model with keras. Trying tf.keras...")
use_tf_keras = True
from tensorflow.keras.models import load_model as load_model_tf
ret = get_model_meta_real(filename, load_model_tf)
# model is successfully loaded. Import layers from tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
print("Model imported using tensorflow.keras")
# put imported functions in global
Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \
Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
return ret
示例14: create_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def create_model(config):
import tensorflow as tf
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation("softmax"))
# initiate RMSprop optimizer
opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6)
# Let"s train the model using RMSprop
model.compile(
loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
示例15: contracting_layer_2D
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import MaxPooling2D [as 别名]
def contracting_layer_2D(input, neurons, ba_norm, ba_norm_momentum):
conv1 = Conv2D(neurons, (3,3), activation='relu', padding='same')(input)
if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
conc1 = concatenate([input, conv1], axis=-1)
conv2 = Conv2D(neurons, (3,3), activation='relu', padding='same')(conc1)
if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
conc2 = concatenate([input, conv2], axis=-1)
pool = MaxPooling2D(pool_size=(2, 2))(conc2)
return pool, conc2
# Create the middle layer between the contracting and expanding layers