本文整理匯總了Python中tensorflow.keras.layers.AveragePooling2D方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.AveragePooling2D方法的具體用法?Python layers.AveragePooling2D怎麽用?Python layers.AveragePooling2D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.AveragePooling2D方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def __init__(self,
in_channels,
out_channels,
upscale_out_size,
data_format="channels_last",
**kwargs):
super(ASPPAvgBranch, self).__init__(**kwargs)
self.upscale_out_size = upscale_out_size
self.data_format = data_format
self.pool = nn.AveragePooling2D(
pool_size=1,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
示例2: __init__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def __init__(self,
in_channels,
out_channels,
pool_out_size,
upscale_out_size,
data_format="channels_last",
**kwargs):
super(PyramidPoolingBranch, self).__init__(**kwargs)
self.upscale_out_size = upscale_out_size
self.data_format = data_format
self.pool = nn.AveragePooling2D(
pool_size=pool_out_size,
data_format=data_format,
name="pool")
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
data_format=data_format,
name="conv")
示例3: create_and_append_layer
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def create_and_append_layer(self, layer, list_to_append_layer_to, activation=None, output_layer=False):
"""Creates and appends a layer to the list provided"""
layer_name = layer[0].lower()
assert layer_name in self.valid_cnn_hidden_layer_types, "Layer name {} not valid, use one of {}".format(
layer_name, self.valid_cnn_hidden_layer_types)
if layer_name == "conv":
list_to_append_layer_to.extend([Conv2D(filters=layer[1], kernel_size=layer[2],
strides=layer[3], padding=layer[4], activation=activation,
kernel_initializer=self.initialiser_function)])
elif layer_name == "maxpool":
list_to_append_layer_to.extend([MaxPool2D(pool_size=(layer[1], layer[1]),
strides=(layer[2], layer[2]), padding=layer[3])])
elif layer_name == "avgpool":
list_to_append_layer_to.extend([AveragePooling2D(pool_size=(layer[1], layer[1]),
strides=(layer[2], layer[2]), padding=layer[3])])
elif layer_name == "linear":
list_to_append_layer_to.extend([Dense(layer[1], activation=activation, kernel_initializer=self.initialiser_function)])
else:
raise ValueError("Wrong layer name")
示例4: QAveragePooling2D
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def QAveragePooling2D( # pylint: disable=invalid-name
pool_size=(2, 2), strides=None, padding="valid", quantizer=None, **kwargs):
"""Computes the quantized version of AveragePooling2D."""
# this is just a convenient layer, not being actually anything fancy. Just
# reminds us that we need to quantize average pooling before the next layer.
def _call(x):
"""Performs inline call to AveragePooling followed by QActivation."""
x = AveragePooling2D(pool_size, strides, padding, **kwargs)(x)
if kwargs.get("name", None):
name = kwargs["name"] + "_act"
else:
name = None
if quantizer:
x = QActivation(quantizer, name=name)(x)
return x
return _call
示例5: create_models
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def create_models(sigma, m):
input = layers.Input((32,32,3))
x = input
for i in range(3):
x = conv_bn_relu(x, 64)
x = layers.AveragePooling2D(2)(x)
for i in range(3):
x = conv_bn_relu(x, 128)
x = layers.AveragePooling2D(2)(x)
for i in range(3):
x = conv_bn_relu(x, 256)
x = layers.GlobalAveragePooling2D()(x)
x = layers.BatchNormalization()(x)
x = ClusteringAffinity(10, m, sigma)(x)
return Model(input, x)
示例6: create_models
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def create_models():
input = layers.Input((32,32,3))
x = input
for i in range(3):
x = conv_bn_relu(x, 64)
x = layers.AveragePooling2D(2)(x)
for i in range(3):
x = conv_bn_relu(x, 128)
x = layers.AveragePooling2D(2)(x)
for i in range(3):
x = conv_bn_relu(x, 256)
x = layers.GlobalAveragePooling2D()(x)
x = layers.BatchNormalization()(x)
x = ClusteringAffinity(10, 1, 90.0)(x)
# To calculate the regularization term, output n_dimensions is 1 more.
# Please ignore it at predict time
return Model(input, x)
示例7: transition_layer
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4):
"""
Creates a transition layer between dense blocks as transition, which do convolution and pooling.
Works as downsampling.
"""
x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(int(nb_channels*compression), (1, 1), padding='same',
use_bias=False, kernel_regularizer=l2(weight_decay))(x)
# Adding dropout
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
示例8: __init__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def __init__(self, nb_filters=64):
super(CNN, self).__init__()
img_size = 32
log_resolution = int(round(math.log(img_size) / math.log(2)))
conv_args = dict(
activation=tf.nn.leaky_relu,
kernel_size=3,
padding='same')
self.layers_obj = []
for scale in range(log_resolution - 2):
conv1 = Conv2D(nb_filters << scale, **conv_args)
conv2 = Conv2D(nb_filters << (scale + 1), **conv_args)
pool = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))
self.layers_obj.append(conv1)
self.layers_obj.append(conv2)
self.layers_obj.append(pool)
conv = Conv2D(10, **conv_args)
self.layers_obj.append(conv)
示例9: __init__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def __init__(self, layers_info, output_activation=None, hidden_activations="relu", dropout= 0.0, initialiser="default",
batch_norm=False, y_range=(), random_seed=0, input_dim=None):
Model.__init__(self)
self.valid_cnn_hidden_layer_types = {'conv', 'maxpool', 'avgpool', 'linear'}
self.valid_layer_types_with_no_parameters = (MaxPool2D, AveragePooling2D)
Base_Network.__init__(self, layers_info, output_activation, hidden_activations, dropout, initialiser,
batch_norm, y_range, random_seed, input_dim)
示例10: features_pyramid
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def features_pyramid(x, n_layers):
"""Generate features pyramid from the output of the
last layer of a backbone network (e.g. ResNetv1 or v2)
Arguments:
x (tensor): Output feature maps of a backbone network
n_layers (int): Number of additional pyramid layers
Return:
outputs (list): Features pyramid
"""
outputs = [x]
conv = AveragePooling2D(pool_size=2, name='pool1')(x)
outputs.append(conv)
prev_conv = conv
n_filters = 512
# additional feature map layers
for i in range(n_layers - 1):
postfix = "_layer" + str(i+2)
conv = conv_layer(prev_conv,
n_filters,
kernel_size=3,
strides=2,
use_maxpool=False,
postfix=postfix)
outputs.append(conv)
prev_conv = conv
return outputs
示例11: make_layers
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def make_layers(cfg,
inputs,
batch_norm=True,
in_channels=1):
"""Helper function to ease the creation of VGG
network model
Arguments:
cfg (dict): Summarizes the network layer
configuration
inputs (tensor): Input from previous layer
batch_norm (Bool): Whether to use batch norm
between Conv2D and ReLU
in_channel (int): Number of input channels
"""
x = inputs
for layer in cfg:
if layer == 'M':
x = MaxPooling2D()(x)
elif layer == 'A':
x = AveragePooling2D(pool_size=3)(x)
else:
x = Conv2D(layer,
kernel_size=3,
padding='same',
kernel_initializer='he_normal'
)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
示例12: __call__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def __call__(self):
logging.debug("Creating model...")
assert ((self._depth - 4) % 6 == 0)
n = (self._depth - 4) / 6
inputs = Input(shape=self._input_shape)
n_stages = [16, 16 * self._k, 32 * self._k, 64 * self._k]
conv1 = Convolution2D(filters=n_stages[0], kernel_size=(3, 3),
strides=(1, 1),
padding="same",
kernel_initializer=self._weight_init,
kernel_regularizer=l2(self._weight_decay),
use_bias=self._use_bias)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = self._wide_basic
conv2 = self._layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1, 1))(conv1)
conv3 = self._layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2, 2))(conv2)
conv4 = self._layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2, 2))(conv3)
batch_norm = BatchNormalization(axis=self._channel_axis)(conv4)
relu = Activation("relu")(batch_norm)
# Classifier block
pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
flatten = Flatten()(pool)
predictions_g = Dense(units=2, kernel_initializer=self._weight_init, use_bias=self._use_bias,
kernel_regularizer=l2(self._weight_decay), activation="softmax",
name="pred_gender")(flatten)
predictions_a = Dense(units=101, kernel_initializer=self._weight_init, use_bias=self._use_bias,
kernel_regularizer=l2(self._weight_decay), activation="softmax",
name="pred_age")(flatten)
model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])
return model
示例13: inception
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def inception(x, filters):
"""Utility function to implement the inception module.
# Arguments
x: input tensor.
filters: a list of filter sizes.
# Returns
Output tensor after applying the inception.
"""
if len(filters) != 4:
raise ValueError('filters should have 4 components')
if len(filters[1]) != 2 or len(filters[2]) != 2:
raise ValueError('incorrect spec of filters')
branch1x1 = conv2d_bn(x, filters[0], (1, 1))
branch3x3 = conv2d_bn(x, filters[1][0], (1, 1))
branch3x3 = conv2d_bn(branch3x3, filters[1][1], (3, 3))
# branch5x5 is implemented with two 3x3 conv2d's
branch5x5 = conv2d_bn(x, filters[2][0], (1, 1))
branch5x5 = conv2d_bn(branch5x5, filters[2][1], (3, 3))
branch5x5 = conv2d_bn(branch5x5, filters[2][1], (3, 3))
# use AveragePooling2D here
branchpool = layers.AveragePooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(x)
branchpool = conv2d_bn(branchpool, filters[3], (1, 1))
concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.concatenate(
[branch1x1, branch3x3, branch5x5, branchpool], axis=concat_axis)
return x
示例14: inception
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def inception(x, filters):
"""Utility function to implement the inception module.
# Arguments
x: input tensor.
filters: a list of filter sizes.
# Returns
Output tensor after applying the inception.
"""
if len(filters) != 4:
raise ValueError('filters should have 4 components')
if len(filters[1]) != 2 or len(filters[2]) != 2:
raise ValueError('incorrect spec of filters')
branch1x1 = conv2d_bn(x, filters[0], (1, 1))
branch3x3 = conv2d_bn(x, filters[1][0], (1, 1))
branch3x3 = conv2d_bn(branch3x3, filters[1][1], (3, 3))
branch5x5 = conv2d_bn(x, filters[2][0], (1, 1))
branch5x5 = conv2d_bn(branch5x5, filters[2][1], (5, 5))
branchpool = layers.AveragePooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(x)
branchpool = conv2d_bn(branchpool, filters[3], (1, 1))
if backend.image_data_format() == 'channels_first':
concat_axis = 1
else:
concat_axis = 3
x = layers.concatenate(
[branch1x1, branch3x3, branch5x5, branchpool], axis=concat_axis)
return x
示例15: create_models
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import AveragePooling2D [as 別名]
def create_models():
input = layers.Input((32,32,3))
x = input
for i in range(3):
x = conv_bn_relu(x, 64)
x = layers.AveragePooling2D(2)(x)
for i in range(3):
x = conv_bn_relu(x, 128)
x = layers.AveragePooling2D(2)(x)
for i in range(3):
x = conv_bn_relu(x, 256)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(10, activation="softmax")(x)
return Model(input, x)