本文整理汇总了Python中keras.layers.pooling.GlobalAveragePooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python pooling.GlobalAveragePooling2D方法的具体用法?Python pooling.GlobalAveragePooling2D怎么用?Python pooling.GlobalAveragePooling2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.pooling
的用法示例。
在下文中一共展示了pooling.GlobalAveragePooling2D方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resnet_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def resnet_model(nb_blocks, bottleneck=True, l2_reg=1e-4):
nb_channels = [16, 32, 64]
inputs = Input((32, 32, 3))
x = Convolution2D(16, 3, 3, border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
for n, f in zip(nb_channels, [True, False, False]):
x = block_stack(x, n, nb_blocks, bottleneck=bottleneck, l2_reg=l2_reg,
first=f)
# Last BN-Relu
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(10)(x)
x = Activation('softmax')(x)
model = Model(input=inputs, output=x)
return model
示例2: densenet_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def densenet_model(nb_blocks, nb_layers, growth_rate, dropout=0., l2_reg=1e-4,
init_channels=16):
n_channels = init_channels
inputs = Input(shape=(32, 32, 3))
x = Convolution2D(init_channels, 3, 3, border_mode='same',
init='he_normal', W_regularizer=l2(l2_reg),
bias=False)(inputs)
for i in range(nb_blocks - 1):
# Create a dense block
x = dense_block(x, nb_layers, growth_rate,
dropout=dropout, l2_reg=l2_reg)
# Update the number of channels
n_channels += nb_layers*growth_rate
# Transition layer
x = transition_block(x, n_channels, dropout=dropout, l2_reg=l2_reg)
# Add last dense_block
x = dense_block(x, nb_layers, growth_rate, dropout=dropout, l2_reg=l2_reg)
# Add final BN-Relu
x = BatchNormalization(gamma_regularizer=l2(l2_reg),
beta_regularizer=l2(l2_reg))(x)
x = Activation('relu')(x)
# Global average pooling
x = GlobalAveragePooling2D()(x)
x = Dense(10, W_regularizer=l2(l2_reg))(x)
x = Activation('softmax')(x)
model = Model(input=inputs, output=x)
return model
# Apply preprocessing as described in the paper: normalize each channel
# individually. We use the values from fb.resnet.torch, but computing the values
# gets a very close answer.
示例3: test_globalpooling_2d
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def test_globalpooling_2d():
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
layer_test(pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
示例4: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def create_model(input_shape, config):
input_tensor = Input(shape=input_shape) # this assumes K.image_dim_ordering() == 'tf'
resnet_model = ResNet50(include_top=False, weights=None, input_tensor=input_tensor)
print(resnet_model.summary())
x = resnet_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(config["num_classes"], activation='softmax')(x)
return Model(input=resnet_model.input, output=predictions)
示例5: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def create_model(input_shape, config):
input_tensor = Input(shape=input_shape) # this assumes K.image_dim_ordering() == 'tf'
xception_model = Xception(include_top=False, weights=None, input_tensor=input_tensor)
print(xception_model.summary())
x = xception_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(config["num_classes"], activation='softmax')(x)
return Model(input=xception_model.input, output=predictions)
示例6: create_model
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def create_model(input_shape, config):
input_tensor = Input(shape=input_shape) # this assumes K.image_dim_ordering() == 'tf'
inception_model = InceptionV3(include_top=False, weights=None, input_tensor=input_tensor)
print(inception_model.summary())
x = inception_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(config["num_classes"], activation='softmax')(x)
return Model(input=inception_model.input, output=predictions)
示例7: create_dense_net
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def create_dense_net(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=16, dropout_rate=None,
weight_decay=1E-4, verbose=True):
''' Build the create_dense_net model
Args:
nb_classes: number of classes
img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
depth: number or layers
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay
Returns: keras tensor with nb_layers of conv_block appended
'''
model_input = Input(shape=img_dim)
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", name="initial_conv2D", bias=False,
W_regularizer=l2(weight_decay))(model_input)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# add transition_block
x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# The last dense_block does not have a transition_block
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation='softmax', W_regularizer=l2(weight_decay), b_regularizer=l2(weight_decay))(x)
densenet = Model(input=model_input, output=x, name="create_dense_net")
if verbose: print("DenseNet-%d-%d created." % (depth, growth_rate))
return densenet
示例8: createDenseNet
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def createDenseNet(nb_classes, img_dim, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=16, dropout_rate=None,
weight_decay=1E-4, verbose=True):
''' Build the create_dense_net model
Args:
nb_classes: number of classes
img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
depth: number or layers
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay
Returns: keras tensor with nb_layers of conv_block appended
'''
model_input = Input(shape=img_dim)
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", name="initial_conv2D", use_bias=False,
kernel_regularizer=l2(weight_decay))(model_input)
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# add transition_block
x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# The last dense_block does not have a transition_block
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x)
densenet = Model(inputs=model_input, outputs=x)
if verbose:
print("DenseNet-%d-%d created." % (depth, growth_rate))
return densenet
示例9: __create_wide_residual_network
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def __create_wide_residual_network(nb_classes, img_input, include_top, depth=28,
width=8, dropout=0.0, activation='softmax'):
''' Creates a Wide Residual Network with specified parameters
Args:
nb_classes: Number of output classes
img_input: Input tensor or layer
include_top: Flag to include the last dense layer
depth: Depth of the network. Compute N = (n - 4) / 6.
For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
width: Width of the network.
dropout: Adds dropout if value is greater than 0.0
Returns:a Keras Model
'''
N = (depth - 4) // 6
x = __conv1_block(img_input)
nb_conv = 4
for i in range(N):
x = __conv2_block(x, width, dropout)
nb_conv += 2
x = MaxPooling2D((2, 2))(x)
for i in range(N):
x = __conv3_block(x, width, dropout)
nb_conv += 2
x = MaxPooling2D((2, 2))(x)
for i in range(N):
x = ___conv4_block(x, width, dropout)
nb_conv += 2
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation=activation)(x)
return x
示例10: raw_vgg
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def raw_vgg(args, input_length=12000 * 29, tf='melgram', normalize=None,
decibel=False, last_layer=True, sr=None):
''' when length = 12000*29 and 512/256 dft/hop,
melgram size: (n_mels, 1360)
'''
assert tf in ('stft', 'melgram')
assert normalize in (None, False, 'no', 0, 0.0, 'batch', 'data_sample', 'time', 'freq', 'channel')
assert isinstance(decibel, bool)
if sr is None:
sr = SR # assumes 12000
conv_until = args.conv_until
trainable_kernel = args.trainable_kernel
model = Sequential()
# decode args
fmin = args.fmin
fmax = args.fmax
if fmax == 0.0:
fmax = sr / 2
n_mels = args.n_mels
trainable_fb = args.trainable_fb
model.add(Melspectrogram(n_dft=512, n_hop=256, power_melgram=2.0,
input_shape=(1, input_length),
trainable_kernel=trainable_kernel,
trainable_fb=trainable_fb,
return_decibel_melgram=decibel,
sr=sr, n_mels=n_mels,
fmin=fmin, fmax=fmax,
name='melgram'))
poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (4, 4)]
if normalize in ('batch', 'data_sample', 'time', 'freq', 'channel'):
model.add(Normalization2D(normalize))
model.add(get_convBNeluMPdrop(5, [32, 32, 32, 32, 32],
[(3, 3), (3, 3), (3, 3), (3, 3), (3, 3)],
poolings, model.output_shape[1:], conv_until=conv_until))
if conv_until != 4:
model.add(GlobalAveragePooling2D())
else:
model.add(Flatten())
if last_layer:
model.add(Dense(50, activation='sigmoid'))
return model
示例11: DenseNet
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate,
nb_filter, dropout_rate=None, weight_decay=1E-4):
""" Build the DenseNet model
:param nb_classes: int -- number of classes
:param img_dim: tuple -- (channels, rows, columns)
:param depth: int -- how many layers
:param nb_dense_block: int -- number of dense blocks to add to end
:param growth_rate: int -- number of filters to add
:param nb_filter: int -- number of filters
:param dropout_rate: float -- dropout rate
:param weight_decay: float -- weight decay
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
model_input = Input(shape=img_dim)
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Convolution2D(nb_filter, 3, 3,
init="he_uniform",
border_mode="same",
name="initial_conv2D",
bias=False,
W_regularizer=l2(weight_decay))(model_input)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = denseblock(x, nb_layers, nb_filter, growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# add transition
x = transition(x, nb_filter, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# The last denseblock does not have a transition
x, nb_filter = denseblock(x, nb_layers, nb_filter, growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
x = Activation('relu')(x)
x = GlobalAveragePooling2D(dim_ordering="th")(x)
x = Dense(nb_classes,
activation='softmax',
W_regularizer=l2(weight_decay),
b_regularizer=l2(weight_decay))(x)
densenet = Model(input=[model_input], output=[x], name="DenseNet")
return densenet
示例12: _makeImageDiscriminator
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def _makeImageDiscriminator(self, img_shape):
'''
create image-only encoder to extract keypoints from the scene.
Params:
-------
img_shape: shape of the image to encode
'''
img = Input(img_shape,name="img_encoder_in")
img0 = Input(img_shape,name="img0_encoder_in")
ins = [img, img0]
dr = self.dropout_rate
if self.use_wasserstein:
loss = wasserstein_loss
activation = "linear"
else:
loss = "binary_crossentropy"
activation = "sigmoid"
# common arguments
kwargs = { "dropout_rate" : dr,
"padding" : "same",
"lrelu" : True,
"bn" : False,
"perm_drop" : True,
}
x = AddConv2D(img, 64, [4,4], 1, **kwargs)
x0 = AddConv2D(img0, 64, [4,4], 1, **kwargs)
x = Add()([x, x0])
x = AddConv2D(x, 64, [4,4], 2, **kwargs)
x = AddConv2D(x, 128, [4,4], 2, **kwargs)
x = AddConv2D(x, 256, [4,4], 2, **kwargs)
if self.use_wasserstein:
x = Flatten()(x)
x = AddDense(x, 1, "linear", 0., output=True, bn=False, perm_drop=True)
else:
x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
bn=False, perm_drop=True)
x = GlobalAveragePooling2D()(x)
discrim = Model(ins, x, name="image_discriminator")
self.lr *= 2.
discrim.compile(loss=loss, loss_weights=[1.],
optimizer=self.getOptimizer())
self.lr *= 0.5
self.image_discriminator = discrim
return discrim
示例13: discriminator
# 需要导入模块: from keras.layers import pooling [as 别名]
# 或者: from keras.layers.pooling import GlobalAveragePooling2D [as 别名]
def discriminator(img_dim, bn_mode, model_name="discriminator"):
"""DCGAN discriminator
Args:
img_dim: dimension of the image output
bn_mode: keras batchnorm mode
model_name: model name (default: {"generator_deconv"})
Returns:
keras model
"""
if K.image_dim_ordering() == "th":
bn_axis = 1
min_s = min(img_dim[1:])
else:
bn_axis = -1
min_s = min(img_dim[:-1])
disc_input = Input(shape=img_dim, name="discriminator_input")
# Get the list of number of conv filters
# (first layer starts with 64), filters are subsequently doubled
nb_conv = int(np.floor(np.log(min_s // 4) / np.log(2)))
list_f = [64 * min(8, (2 ** i)) for i in range(nb_conv)]
# First conv with 2x2 strides
x = Conv2D(list_f[0], (3, 3), strides=(2, 2), name="disc_conv2d_1",
padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(disc_input)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
# Conv blocks: Conv2D(2x2 strides)->BN->LReLU
for i, f in enumerate(list_f[1:]):
name = "disc_conv2d_%s" % (i + 2)
x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(x)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
# Last convolution
x = Conv2D(1, (3, 3), name="last_conv", padding="same", use_bias=False,
kernel_initializer=RandomNormal(stddev=0.02))(x)
# Average pooling
x = GlobalAveragePooling2D()(x)
discriminator_model = Model(inputs=[disc_input], outputs=[x], name=model_name)
visualize_model(discriminator_model)
return discriminator_model