當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.BatchNormalization方法代碼示例

本文整理匯總了Python中tensorflow.keras.layers.BatchNormalization方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.BatchNormalization方法的具體用法?Python layers.BatchNormalization怎麽用?Python layers.BatchNormalization使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.BatchNormalization方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def __init__(self, out_features,**kwargs):
        super(_DenseLayer, self).__init__(**kwargs)
        k_reg = None if w_decay is None else l2(w_decay)
        self.layers = []
        self.layers.append(tf.keras.Sequential(
            [
                layers.ReLU(),
                layers.Conv2D(
                    filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
                    use_bias=True, kernel_initializer=weight_init,
                kernel_regularizer=k_reg),
                layers.BatchNormalization(),
                layers.ReLU(),
                layers.Conv2D(
                    filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
                    use_bias=True, kernel_initializer=weight_init,
                    kernel_regularizer=k_reg),
                layers.BatchNormalization(),
            ])) # first relu can be not needed 
開發者ID:xavysp,項目名稱:DexiNed,代碼行數:21,代碼來源:model.py

示例2: _create_encoder

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(len(self._filter_sizes)):
      filter_size = self._filter_sizes[i]
      kernel_size = self._kernel_sizes[i]
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = Conv1D(
          filters=filter_size, kernel_size=kernel_size,
          activation=tf.nn.relu)(prev_layer)
    prev_layer = Flatten()(prev_layer)
    prev_layer = Dense(
        self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
    prev_layer = BatchNormalization()(prev_layer)
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:20,代碼來源:seqtoseq.py

示例3: conv2d_bn

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def conv2d_bn(x,
              filters,
              kernel_size,
              strides=1,
              padding='same',
              activation='relu',
              use_bias=False,
              name=None):
    x = Conv2D(filters,
               kernel_size,
               strides=strides,
               padding=padding,
               use_bias=use_bias,
               name=name)(x)
    if not use_bias:
        bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
        bn_name = _generate_layer_name('BatchNorm', prefix=name)
        x = BatchNormalization(axis=bn_axis, momentum=0.995, epsilon=0.001,
                               scale=False, name=bn_name)(x)
    if activation is not None:
        ac_name = _generate_layer_name('Activation', prefix=name)
        x = Activation(activation, name=ac_name)(x)
    return x 
開發者ID:aangfanboy,項目名稱:TripletLossFace,代碼行數:25,代碼來源:inception_resnet_v1.py

示例4: create_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def create_model(trainable=False):
    model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet")

    for layer in model.layers:
        layer.trainable = trainable

    block = model.get_layer("block_16_project_BN").output

    x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block)
    x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x)

    model = Model(inputs=model.input, outputs=x)

    # divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight
    # see https://arxiv.org/pdf/1711.05101.pdf
    regularizer = l2(WEIGHT_DECAY / 2)
    for weight in model.trainable_weights:
        with tf.keras.backend.name_scope("weight_regularizer"):
            model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight)

    return model 
開發者ID:lars76,項目名稱:object-localization,代碼行數:27,代碼來源:train.py

示例5: conv_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def conv_layer(inputs,
               filters=32,
               kernel_size=3,
               strides=1,
               use_maxpool=True,
               postfix=None,
               activation=None):

    x = conv2d(inputs,
               filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               name='conv'+postfix)
    x = BatchNormalization(name="bn"+postfix)(x)
    x = ELU(name='elu'+postfix)(x)
    if use_maxpool:
        x = MaxPooling2D(name='pool'+postfix)(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:20,代碼來源:model.py

示例6: conv_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def conv_layer(inputs,
               filters=32,
               kernel_size=3,
               strides=1,
               use_maxpool=True,
               postfix=None,
               activation=None):
    """Helper function to build Conv2D-BN-ReLU layer
        with optional MaxPooling2D.
    """

    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               kernel_initializer='he_normal',
               name="conv_"+postfix,
               padding='same')(inputs)
    x = BatchNormalization(name="bn_"+postfix)(x)
    x = Activation('relu', name='relu_'+postfix)(x)
    if use_maxpool:
        x = MaxPooling2D(name='pool'+postfix)(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:24,代碼來源:model.py

示例7: tconv_layer

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def tconv_layer(inputs,
                filters=32,
                kernel_size=3,
                strides=2,
                postfix=None):
    """Helper function to build Conv2DTranspose-BN-ReLU 
        layer
    """
    x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding='same',
                        kernel_initializer='he_normal',
                        name='tconv_'+postfix)(inputs)
    x = BatchNormalization(name="bn_"+postfix)(x)
    x = Activation('relu', name='relu_'+postfix)(x)
    return x 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:19,代碼來源:model.py

示例8: buildModel

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def buildModel(patchShape, numClasses):
    input = Input(shape=patchShape)
    n_base_fileter = 32
    _handle_data_format()
    conv = Conv3D(filters=n_base_fileter, kernel_size=(7, 7, 7),
                  strides=(2, 2, 2), kernel_initializer="he_normal",
                  )(input)
    norm = BatchNormalization(axis=CHANNEL_AXIS)(conv)
    conv1 = Activation("relu")(norm)
    pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2),
                         padding="same")(conv1)
    flatten1 = Flatten()(pool1)
    dense = Dense(units=numClasses,
                  kernel_initializer="he_normal",
                  activation="softmax",
                  kernel_regularizer=l2(1e-4))(flatten1)
    model = Model(inputs=input, outputs=dense)
    return model 
開發者ID:thomaskuestner,項目名稱:CNNArt,代碼行數:20,代碼來源:multiclass_3D_CNN.py

示例9: expanding_layer_2D

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def expanding_layer_2D(input, neurons, concatenate_link, ba_norm,
                       ba_norm_momentum):
    up = concatenate([Conv2DTranspose(neurons, (2, 2), strides=(2, 2),
                     padding='same')(input), concatenate_link], axis=-1)
    conv1 = Conv2D(neurons, (3, 3,), activation='relu', padding='same')(up)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conc1 = concatenate([up, conv1], axis=-1)
    conv2 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(conc1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    conc2 = concatenate([up, conv2], axis=-1)
    return conc2

#-----------------------------------------------------#
#                   Subroutines 3D                    #
#-----------------------------------------------------#
# Create a contracting layer 
開發者ID:frankkramer-lab,項目名稱:MIScnn,代碼行數:18,代碼來源:dense.py

示例10: expanding_layer_2D

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def expanding_layer_2D(input, neurons, concatenate_link, ba_norm,
                       ba_norm_momentum):
    up = concatenate([Conv2DTranspose(neurons, (2, 2), strides=(2, 2),
                     padding='same')(input), concatenate_link], axis=-1)
    conv1 = Conv2D(neurons, (3, 3,), activation='relu', padding='same')(up)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conv2 = Conv2D(neurons, (3, 3), activation='relu', padding='same')(conv1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    shortcut = Conv2D(neurons, (1, 1), activation='relu', padding="same")(up)
    add_layer = add([shortcut, conv2])
    return add_layer

#-----------------------------------------------------#
#                   Subroutines 3D                    #
#-----------------------------------------------------#
# Create a contracting layer 
開發者ID:frankkramer-lab,項目名稱:MIScnn,代碼行數:18,代碼來源:residual.py

示例11: trans_conv3d_bn

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def trans_conv3d_bn(x, filters, num_row, num_col, num_z, padding='same', strides=(2, 2, 2), name=None):
    '''
    2D Transposed Convolutional layers

    Arguments:
        x {keras layer} -- input layer
        filters {int} -- number of filters
        num_row {int} -- number of rows in filters
        num_col {int} -- number of columns in filters
        num_z {int} -- length along z axis in filters

    Keyword Arguments:
        padding {str} -- mode of padding (default: {'same'})
        strides {tuple} -- stride of convolution operation (default: {(2, 2, 2)})
        name {str} -- name of the layer (default: {None})

    Returns:
        [keras layer] -- [output layer]
    '''


    x = Conv3DTranspose(filters, (num_row, num_col, num_z), strides=strides, padding=padding)(x)
    x = BatchNormalization(axis=4, scale=False)(x)

    return x 
開發者ID:frankkramer-lab,項目名稱:MIScnn,代碼行數:27,代碼來源:multiRes.py

示例12: augmented_conv2d

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                     depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.

    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.

    Returns:
        a keras tensor.
    """
    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)

    # Augmented Attention Block
    qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
    attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)

    output = concatenate([conv_out, attn_out], axis=channel_axis)
    output = BatchNormalization()(output)
    return output 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:41,代碼來源:attn_augconv.py

示例13: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def __init__(self, filters, size, padding='SAME', apply_batchnorm=True):
        super(Downsample, self).__init__()
        self.apply_batchnorm = apply_batchnorm
        initializer = tf.random_normal_initializer(0., 0.02)
        filters = int(filters)
        self.conv1 = layers.Conv2D(filters,
                                   (size, size),
                                   strides=2,
                                   padding=padding,
                                   kernel_initializer=initializer,
                                   use_bias=False)
        if self.apply_batchnorm:
            self.batchnorm = tf.keras.layers.BatchNormalization() 
開發者ID:yaojieliu,項目名稱:CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing,代碼行數:15,代碼來源:utils.py

示例14: make_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def make_model(**kwargs) -> tf.keras.Model:
    # Model is based on MicronNet: https://arxiv.org/abs/1804.00497v3

    img_size = 48
    NUM_CLASSES = 43
    eps = 1e-6

    inputs = Input(shape=(img_size, img_size, 3))
    x = Conv2D(1, (1, 1), padding="same")(inputs)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = Conv2D(29, (5, 5), padding="same")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(59, (3, 3), padding="same")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(74, (3, 3), padding="same")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Flatten()(x)
    x = Dense(300)(x)
    x = Activation("relu")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Dense(300, activation="relu")(x)
    predictions = Dense(NUM_CLASSES, activation="softmax")(x)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(
        optimizer=tf.keras.optimizers.SGD(
            lr=0.01, decay=1e-6, momentum=0.9, nesterov=True
        ),
        loss=tf.keras.losses.sparse_categorical_crossentropy,
        metrics=["accuracy"],
    )

    return model 
開發者ID:twosixlabs,項目名稱:armory,代碼行數:42,代碼來源:micronnet_gtsrb.py

示例15: DenseLayerNet

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import BatchNormalization [as 別名]
def DenseLayerNet(inshape, layer_sizes, nb_labels=2, activation='relu', final_activation='softmax', dropout=None, batch_norm=None):
    """
    A densenet that connects a set of dense layers to  a classification
    output. 
    if nb_labels is 0 assume it is a regression net and use linear activation
    (if None specified)
    """
    inputs = KL.Input(shape=inshape, name='input')
    prev_layer = KL.Flatten(name='flat_inputs')(inputs)
    # to prevent overfitting include some kernel and bias regularization
    kreg = keras.regularizers.l1_l2(l1=1e-5, l2=1e-4)
    breg = keras.regularizers.l2(1e-4)

    # connect the list of dense layers to each other
    for lno, layer_size in enumerate(layer_sizes):
        prev_layer = KL.Dense(layer_size, name='dense%d' % lno, activation=activation,kernel_regularizer=kreg, bias_regularizer=breg)(prev_layer)
        if dropout is not None:
            prev_layer = KL.Dropout(dropout, name='dropout%d'%lno)(prev_layer)
        if batch_norm is not None:
            prev_layer = KL.BatchNormalization(name='BatchNorm%d'%lno)(prev_layer)
            
    # tie the previous dense layer to a onehot encoded output layer
    last_layer = KL.Dense(nb_labels, name='last_dense', activation=final_activation)(prev_layer)

    model = keras.models.Model(inputs=inputs, outputs=last_layer)
    return(model)


###############################################################################
# Helper function
############################################################################### 
開發者ID:adalca,項目名稱:neuron,代碼行數:33,代碼來源:models.py


注:本文中的tensorflow.keras.layers.BatchNormalization方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。