本文整理汇总了Python中keras.layers.MaxPooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.MaxPooling2D方法的具体用法?Python layers.MaxPooling2D怎么用?Python layers.MaxPooling2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.MaxPooling2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_cae_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def build_cae_model(height=32, width=32, channel=3):
"""
build convolutional autoencoder model
"""
input_img = Input(shape=(height, width, channel))
# encoder
net = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
net = MaxPooling2D((2, 2), padding='same')(net)
net = Conv2D(8, (3, 3), activation='relu', padding='same')(net)
net = MaxPooling2D((2, 2), padding='same')(net)
net = Conv2D(4, (3, 3), activation='relu', padding='same')(net)
encoded = MaxPooling2D((2, 2), padding='same', name='enc')(net)
# decoder
net = Conv2D(4, (3, 3), activation='relu', padding='same')(encoded)
net = UpSampling2D((2, 2))(net)
net = Conv2D(8, (3, 3), activation='relu', padding='same')(net)
net = UpSampling2D((2, 2))(net)
net = Conv2D(16, (3, 3), activation='relu', padding='same')(net)
net = UpSampling2D((2, 2))(net)
decoded = Conv2D(channel, (3, 3), activation='sigmoid', padding='same')(net)
return Model(input_img, decoded)
示例2: build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def build_model(x_train, num_classes):
# Reset default graph. Keras leaves old ops in the graph,
# which are ignored for execution but clutter graph
# visualization in TensorBoard.
tf.reset_default_graph()
inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
name="conv1")(inputs)
x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
name="conv2")(x)
x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
x = KL.Flatten(name="flat1")(x)
x = KL.Dense(128, activation='relu', name="dense1")(x)
x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)
return KM.Model(inputs, x, "digit_classifier_model")
# Load MNIST Data
示例3: max_pool2d
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def max_pool2d(h_kernel_size, h_stride):
def compile_fn(di, dh):
layer = layers.MaxPooling2D(pool_size=dh['kernel_size'],
strides=(dh['stride'], dh['stride']),
padding='same')
def fn(di):
return {'out': layer(di['in'])}
return fn
return siso_keras_module('MaxPool2D', compile_fn, {
'kernel_size': h_kernel_size,
'stride': h_stride,
})
示例4: modelF
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def modelF():
model = Sequential()
model.add(Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=(FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(FLAGS.NUM_CLASSES))
return model
示例5: _initial_conv_block_inception
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
''' Adds an initial conv block, with batch norm and relu for the DPN
Args:
input: input tensor
initial_conv_filters: number of filters for initial conv block
weight_decay: weight decay factor
Returns: a keras tensor
'''
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
return x
示例6: cnn_2D
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def cnn_2D(self, input_shape, modual=''):
#建立Sequential模型
model_in = Input(input_shape)
model = Conv2D(
filters = 6,
kernel_size = (3, 3),
input_shape = input_shape,
activation='relu',
kernel_initializer='he_normal',
name = modual+'conv1'
)(model_in)# now 30x30x6
model = MaxPooling2D(pool_size=(2,2))(model)# now 15x15x6
model = Conv2D(
filters = 8,
kernel_size = (4, 4),
activation='relu',
kernel_initializer='he_normal',
name = modual+'conv2'
)(model)# now 12x12x8
model = MaxPooling2D(pool_size=(2,2))(model)# now 6x6x8
model = Flatten()(model)
model = Dropout(0.5)(model)
model_out = Dense(100, activation='relu', name = modual+'fc1')(model)
return model_in, model_out
示例7: get_Shared_Model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def get_Shared_Model(input_dim):
sharedNet = Sequential()
sharedNet.add(Dense(128, input_shape=(input_dim,), activation='relu'))
sharedNet.add(Dropout(0.1))
sharedNet.add(Dense(128, activation='relu'))
sharedNet.add(Dropout(0.1))
sharedNet.add(Dense(128, activation='relu'))
# sharedNet.add(Dropout(0.1))
# sharedNet.add(Dense(3, activation='relu'))
# sharedNet = Sequential()
# sharedNet.add(Dense(4096, activation="tanh", kernel_regularizer=l2(2e-3)))
# sharedNet.add(Reshape(target_shape=(64, 64, 1)))
# sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3)))
# sharedNet.add(MaxPooling2D())
# sharedNet.add(Conv2D(filters=128, kernel_size=3, strides=(2, 2), padding="same", activation="relu", kernel_regularizer=l2(1e-3)))
# sharedNet.add(MaxPooling2D())
# sharedNet.add(Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding="same", activation="relu", kernel_regularizer=l2(1e-3)))
# sharedNet.add(Flatten())
# sharedNet.add(Dense(1024, activation="sigmoid", kernel_regularizer=l2(1e-3)))
return sharedNet
示例8: build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def build_model(self):
self.model = Sequential()
self.model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu', input_shape=(28, 28, 1)))
self.model.add(Conv2D(64, (3, 3), activation='relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(128, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(10, activation='softmax'))
self.model.compile(
loss='sparse_categorical_crossentropy',
optimizer=self.config.model.optimizer,
metrics=['accuracy'])
示例9: VGG_16
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def VGG_16():
'''Model definition'''
model = Sequential()
model.add(Conv2D(64, (11, 11,), padding='valid', strides=(4,4), input_shape=(img_height,img_width,num_channels), name='conv1'))
model.add(Activation('relu', name='relu1'))
model.add(LocalResponseNormalization(name='norm1'))
model.add(MaxPooling2D((2,2), padding='same', name='pool1'))
model.add(Conv2D(256, (5,5), padding='same', name='conv2'))
model.add(Activation('relu', name='relu2'))
model.add(LocalResponseNormalization(name='norm2'))
model.add(MaxPooling2D((2,2), padding='same', name='pool2'))
model.add(Conv2D(256, (3, 3), padding='same', name='conv3'))
model.add(Activation('relu', name='relu3'))
model.add(Conv2D(256, (3, 3), padding='same', name='conv4'))
model.add(Activation('relu', name='relu4'))
model.add(Conv2D(256, (3, 3), padding='same', name='conv5'))
model.add(Activation('relu', name='relu5'))
model.add(MaxPooling2D((2,2), padding='same', name='pool5'))
return model
示例10: get_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def get_model():
model = models.Sequential()
model.add(layers.Conv2D(16,(3,3),activation='relu',input_shape=(135,240,3),padding = 'same'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(32,(3,3),activation='relu',padding = 'same'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu',padding = 'same'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu',padding = 'same'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128,(3,3),activation='relu',padding = 'same'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(128,activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(27,activation="softmax"))
return model
#model.summary()
#plot_model(model, to_file='model.png')
示例11: create_vgglike_network
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def create_vgglike_network(input_shape, weights):
input = Input(shape=input_shape)
# input: 192x256 images with 3 channels -> (192, 256, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
x = Conv2D(32, (3, 3), activation='relu')(input)
x = Conv2D(32, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
# x = Dense(2, activation='softmax')(x)
x = Dense(128, activation='relu')(x)
return Model(input, x)
示例12: build
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def build(width, height, depth, total_classes, Saved_Weights_Path=None):
# Initialize the Model
model = Sequential()
# First CONV => RELU => POOL Layer
model.add(Conv2D(20, 5, 5, border_mode="same", input_shape=(depth, height, width)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))
# Second CONV => RELU => POOL Layer
model.add(Conv2D(50, 5, 5, border_mode="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))
# Third CONV => RELU => POOL Layer
# Convolution -> ReLU Activation Function -> Pooling Layer
model.add(Conv2D(100, 5, 5, border_mode="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))
# FC => RELU layers
# Fully Connected Layer -> ReLU Activation Function
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# Using Softmax Classifier for Linear Classification
model.add(Dense(total_classes))
model.add(Activation("softmax"))
# If the saved_weights file is already present i.e model is pre-trained, load that weights
if Saved_Weights_Path is not None:
model.load_weights(Saved_Weights_Path)
return model
# --------------------------------- EOC ------------------------------------
开发者ID:anujdutt9,项目名称:Handwritten-Digit-Recognition-using-Deep-Learning,代码行数:37,代码来源:neural_network.py
示例13: load_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def load_model():
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
tensor_in = Input((60, 200, 3))
out = tensor_in
out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Flatten()(out)
out = Dropout(0.5)(out)
out = [Dense(37, name='digit1', activation='softmax')(out),\
Dense(37, name='digit2', activation='softmax')(out),\
Dense(37, name='digit3', activation='softmax')(out),\
Dense(37, name='digit4', activation='softmax')(out),\
Dense(37, name='digit5', activation='softmax')(out),\
Dense(37, name='digit6', activation='softmax')(out)]
model = Model(inputs=tensor_in, outputs=out)
# Define the optimizer
model.compile(loss='categorical_crossentropy', optimizer='Adamax', metrics=['accuracy'])
if 'Windows' in platform.platform():
model.load_weights('{}\\cnn_weight\\verificatioin_code.h5'.format(PATH))
else:
model.load_weights('{}/cnn_weight/verificatioin_code.h5'.format(PATH))
return model
示例14: build_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def build_model(n_classes):
if K.image_dim_ordering() == 'th':
input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
channel_axis = 1
else:
input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
channel_axis = 3
melgram_input = Input(shape=input_shape)
m_sizes = [50, 70]
n_sizes = [1, 3, 5]
n_filters = [128, 64, 32]
maxpool_const = 4
layers = list()
for m_i in m_sizes:
for i, n_i in enumerate(n_sizes):
x = Convolution2D(n_filters[i], m_i, n_i,
border_mode='same',
init='he_normal',
W_regularizer=l2(1e-5),
name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
layers.append(x)
x = merge(layers, mode='concat', concat_axis=channel_axis)
x = Dropout(0.5)(x)
x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
model = Model(melgram_input, x)
return model
示例15: resnet_graph
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import MaxPooling2D [as 别名]
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################