当前位置: 首页>>代码示例>>Python>>正文


Python layers.GlobalAveragePooling2D方法代码示例

本文整理汇总了Python中tensorflow.keras.layers.GlobalAveragePooling2D方法的典型用法代码示例。如果您正苦于以下问题:Python layers.GlobalAveragePooling2D方法的具体用法?Python layers.GlobalAveragePooling2D怎么用?Python layers.GlobalAveragePooling2D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.GlobalAveragePooling2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_densenet121_resisc_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def make_densenet121_resisc_model(**model_kwargs) -> tf.keras.Model:
    # Load ImageNet pre-trained DenseNet
    model_notop = DenseNet121(
        include_top=False, weights=None, input_shape=(224, 224, 3)
    )

    # Add new layers
    x = GlobalAveragePooling2D()(model_notop.output)
    predictions = Dense(num_classes, activation="softmax")(x)

    # Create graph of new model and freeze pre-trained layers
    new_model = Model(inputs=model_notop.input, outputs=predictions)

    for layer in new_model.layers[:-1]:
        layer.trainable = False
        if "bn" == layer.name[-2:]:  # allow batchnorm layers to be trainable
            layer.trainable = True

    # compile the model
    new_model.compile(
        optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
    )

    return new_model 
开发者ID:twosixlabs,项目名称:armory,代码行数:26,代码来源:densenet121_resisc45.py

示例2: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 data_format="channels_last",
                 **kwargs):
        super(PyramidPoolingZeroBranch, self).__init__(**kwargs)
        self.in_size = in_size
        self.data_format = data_format

        self.pool = nn.GlobalAveragePooling2D(
            data_format=data_format,
            name="pool")
        self.conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv")
        self.up = InterpolationBlock(
            scale_factor=None,
            interpolation="bilinear",
            data_format=data_format,
            name="up") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:25,代码来源:bisenet.py

示例3: create_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def create_model(trainable=False):
    model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA)

    # to freeze layers
    for layer in model.layers:
        layer.trainable = trainable

    out = model.layers[-1].output

    x = Conv2D(4, kernel_size=3)(out)
    x = Reshape((4,), name="coords")(x)

    y = GlobalAveragePooling2D()(out)
    y = Dense(CLASSES, name="classes", activation="softmax")(y)

    return Model(inputs=model.input, outputs=[x, y]) 
开发者ID:lars76,项目名称:object-localization,代码行数:18,代码来源:train.py

示例4: KOrderModel

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def KOrderModel(extractor_name,
                embedding_sizes,
                high_order_dims,
                ho_trainable=False,
                end_layer=None):
    model = get_extractor(extractor_name, end_layer=end_layer)
    inputs = model.input
    x = model.output

    max_order = len(high_order_dims)
    output_list = [x]

    # Add all high-order approximation layers:
    for k, order_dim in enumerate(high_order_dims, start=2):
        x_ho = CKOP(output_dim=order_dim, name='CKOP_' + str(k), ho_trainable=ho_trainable)([x] * k)
        output_list.append(x_ho)

    # Add pooling and embedding layers:
    for k in range(len(output_list)):
        output_list[k] = GlobalAveragePooling2D(name='GAP_' + extractor_name + '_O' + str(k + 1))(output_list[k])
        if embedding_sizes[k] > 0:
            output_list[k] = Dense(embedding_sizes[k], use_bias=False)(output_list[k])
        output_list[k] = L2Normalisation(name='L2_' + extractor_name + '_O' + str(k + 1))(output_list[k])

    return Model(inputs=inputs, outputs=output_list, name=extractor_name + '_O' + str(max_order)), get_preprocess_method(extractor_name) 
开发者ID:pierre-jacob,项目名称:ICCV2019-Horde,代码行数:27,代码来源:horde_models.py

示例5: get_aggregation_gate

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def get_aggregation_gate(in_filters, reduction=16):
    """Get the "aggregation gate (AG)" op.

    # Arguments
        reduction: channel reduction for the hidden layer.

    # Returns
        The AG op (a models.Sequential module).
    """
    gate = models.Sequential()
    gate.add(layers.GlobalAveragePooling2D())
    gate.add(layers.Dense(in_filters // reduction, use_bias=False))
    gate.add(layers.BatchNormalization())
    gate.add(layers.Activation('relu'))
    gate.add(layers.Dense(in_filters))
    gate.add(layers.Activation('sigmoid'))
    gate.add(layers.Reshape((1, 1, -1)))  # reshape as (H, W, C)
    return gate 
开发者ID:jkjung-avt,项目名称:keras_imagenet,代码行数:20,代码来源:osnet.py

示例6: create_models

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def create_models(sigma, m):
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, m, sigma)(x)

    return Model(input, x) 
开发者ID:koshian2,项目名称:affinity-loss,代码行数:18,代码来源:cnn_cifar_optuna_affinity.py

示例7: create_models

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def create_models():
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, 1, 90.0)(x)

    # To calculate the regularization term, output n_dimensions is 1 more. 
    # Please ignore it at predict time

    return Model(input, x) 
开发者ID:koshian2,项目名称:affinity-loss,代码行数:21,代码来源:cnn_cifar_affinity.py

示例8: _se_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def _se_block(inputs, filters, se_ratio, prefix):
    x = GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs)
    if K.image_data_format() == 'channels_first':
        x = Reshape((filters, 1, 1))(x)
    else:
        x = Reshape((1, 1, filters))(x)
    x = Conv2D(_depth(filters * se_ratio),
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv')(x)
    x = ReLU(name=prefix + 'squeeze_excite/Relu')(x)
    x = Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv_1')(x)
    x = Activation(hard_sigmoid)(x)
    #if K.backend() == 'theano':
        ## For the Theano backend, we have to explicitly make
        ## the excitation weights broadcastable.
        #x = Lambda(
            #lambda br: K.pattern_broadcast(br, [True, True, True, False]),
            #output_shape=lambda input_shape: input_shape,
            #name=prefix + 'squeeze_excite/broadcast')(x)
    x = Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
    return x 
开发者ID:david8862,项目名称:keras-YOLOv3-model-set,代码行数:27,代码来源:mobilenet_v3.py

示例9: squeeze_excite_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
    Returns: a keras tensor
    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    filters = init._keras_shape[1]
    se_shape = (1, 1, filters)
    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
    x = multiply([init, se])
    return x 
开发者ID:1044197988,项目名称:TF.Keras-Commonly-used-models,代码行数:20,代码来源:SE.py

示例10: _build_graph

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def _build_graph(self):
    smile_images = Input(shape=self.input_shape)
    stem = chemnet_layers.Stem(self.base_filters)(smile_images)

    inceptionA_out = self.build_inception_module(inputs=stem, type="A")
    reductionA_out = chemnet_layers.ReductionA(
        self.base_filters)(inceptionA_out)

    inceptionB_out = self.build_inception_module(
        inputs=reductionA_out, type="B")
    reductionB_out = chemnet_layers.ReductionB(
        self.base_filters)(inceptionB_out)

    inceptionC_out = self.build_inception_module(
        inputs=reductionB_out, type="C")
    avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * self.n_classes)(avg_pooling_out)
      logits = Reshape((self.n_tasks, self.n_classes))(logits)
      if self.n_classes == 2:
        output = Activation(activation='sigmoid')(logits)
        loss = SigmoidCrossEntropy()
      else:
        output = Softmax()(logits)
        loss = SoftmaxCrossEntropy()
      outputs = [output, logits]
      output_types = ['prediction', 'loss']

    else:
      output = Dense(self.n_tasks * 1)(avg_pooling_out)
      output = Reshape((self.n_tasks, 1))(output)
      outputs = [output]
      output_types = ['prediction']
      loss = L2Loss()

    model = tf.keras.Model(inputs=[smile_images], outputs=outputs)
    return model, loss, output_types 
开发者ID:deepchem,项目名称:deepchem,代码行数:40,代码来源:chemnet_models.py

示例11: squeeze_excite_block

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def squeeze_excite_block(input_tensor, ratio=16):
    """ Create a channel-wise squeeze-excite block

    Args:
        input_tensor: input Keras tensor
        ratio: number of output filters

    Returns: a Keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    """
    init = input_tensor
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = _tensor_shape(init)[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x 
开发者ID:titu1994,项目名称:keras-squeeze-excite-network,代码行数:29,代码来源:se.py

示例12: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def __init__(self,
                 channels,
                 reduction=16,
                 round_mid=False,
                 mid_activation="relu",
                 out_activation="sigmoid",
                 data_format="channels_last",
                 **kwargs):
        super(SEBlock, self).__init__(**kwargs)
        self.data_format = data_format
        self.use_conv2 = (reduction > 1)
        mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)

        self.pool = nn.GlobalAveragePooling2D(
            data_format=data_format,
            name="pool")
        self.fc1 = nn.Dense(
            units=mid_channels,
            input_dim=channels,
            name="fc1")
        if self.use_conv2:
            self.activ = get_activation_layer(mid_activation, name="activ")
            self.fc2 = nn.Dense(
                units=channels,
                input_dim=mid_channels,
                name="fc2")
        self.sigmoid = get_activation_layer(out_activation, name="sigmoid") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:29,代码来源:sinet.py

示例13: _keras_global_avgpool_core

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def _keras_global_avgpool_core(shape=None, data=None):
    assert shape is None or data is None
    if shape is None:
        shape = data.shape

    model = Sequential()
    layer = GlobalAveragePooling2D(input_shape=shape[1:], data_format="channels_last")
    model.add(layer)

    if data is None:
        data = np.random.uniform(size=shape)
    out = model.predict(data)
    return model, out 
开发者ID:tf-encrypted,项目名称:tf-encrypted,代码行数:15,代码来源:convert_test.py

示例14: Baseline

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def Baseline(extractor_name,
             embedding_size,
             end_layer=None):
    model = get_extractor(extractor_name, end_layer=end_layer)
    inputs = model.input
    x = model.output

    x = Conv2D(embedding_size, (1, 1), use_bias=False, name='Embedding')(x)
    x = GlobalAveragePooling2D(name='GAP')(x)
    x = L2Normalisation(name='L2')(x)

    return Model(inputs=inputs, outputs=x, name=extractor_name), get_preprocess_method(extractor_name) 
开发者ID:pierre-jacob,项目名称:ICCV2019-Horde,代码行数:14,代码来源:dml_models.py

示例15: CascadedKOrder

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import GlobalAveragePooling2D [as 别名]
def CascadedKOrder(extractor_name,
                   embedding_sizes,
                   high_order_dims,
                   ho_trainable=True,
                   end_layer=None):
    model = get_extractor(extractor_name, end_layer=end_layer)
    inputs = model.input
    x = model.output

    max_order = len(high_order_dims)
    output_list = [x]

    # Add all high-order approximation layers:
    for k, order_dim in enumerate(high_order_dims, start=2):
        only_project_second = False if k == 2 else True
        x_ho = PKOB(order_dim,
                    only_project_second=only_project_second,
                    ho_trainable=ho_trainable)([output_list[-1], x])
        output_list.append(x_ho)

    # Add pooling and embedding layers:
    for k in range(len(output_list)):
        output_list[k] = GlobalAveragePooling2D(name='GAP_' + extractor_name + '_O' + str(k + 1))(output_list[k])

        if ho_trainable:
            output_list[k] = Dense(embedding_sizes[k],
                                   use_bias=False,
                                   name='Proj_' + extractor_name + '_O' + str(k + 1))(output_list[k])
        elif k == 0:
            output_list[k] = Dense(embedding_sizes[k],
                                   use_bias=False,
                                   name='Proj_' + extractor_name + '_O' + str(k + 1))(output_list[k])

        output_list[k] = L2Normalisation(name='L2_' + extractor_name + '_O' + str(k + 1))(output_list[k])

    return Model(inputs=inputs, outputs=output_list, name=extractor_name + '_O' + str(max_order)), get_preprocess_method(extractor_name) 
开发者ID:pierre-jacob,项目名称:ICCV2019-Horde,代码行数:38,代码来源:horde_models.py


注:本文中的tensorflow.keras.layers.GlobalAveragePooling2D方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。