当前位置: 首页>>代码示例>>Python>>正文


Python layers.BatchNormalization方法代码示例

本文整理汇总了Python中tensorflow.python.keras.layers.BatchNormalization方法的典型用法代码示例。如果您正苦于以下问题:Python layers.BatchNormalization方法的具体用法?Python layers.BatchNormalization怎么用?Python layers.BatchNormalization使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.keras.layers的用法示例。


在下文中一共展示了layers.BatchNormalization方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: resnet_module

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def resnet_module(input, channel_depth, strided_pool=False ):
    residual_input = input
    stride = 1

    if(strided_pool):
        stride = 2
        residual_input = Conv2D(channel_depth, kernel_size=1, strides=stride, padding="same")(residual_input)
        residual_input = BatchNormalization()(residual_input)

    input = Conv2D(int(channel_depth/4), kernel_size=1, strides=stride, padding="same")(input)
    input = BatchNormalization()(input)
    input = Activation("relu")(input)

    input = Conv2D(int(channel_depth / 4), kernel_size=3, strides=1, padding="same")(input)
    input = BatchNormalization()(input)
    input = Activation("relu")(input)

    input = Conv2D(channel_depth, kernel_size=1, strides=1, padding="same")(input)
    input = BatchNormalization()(input)

    input = add([input, residual_input])
    input = Activation("relu")(input)

    return input 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:26,代码来源:resnet50.py

示例2: resnet_first_block_first_module

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def resnet_first_block_first_module(input, channel_depth):
    residual_input = input
    stride = 1

    residual_input = Conv2D(channel_depth, kernel_size=1, strides=1, padding="same")(residual_input)
    residual_input = BatchNormalization()(residual_input)

    input = Conv2D(int(channel_depth/4), kernel_size=1, strides=stride, padding="same")(input)
    input = BatchNormalization()(input)
    input = Activation("relu")(input)

    input = Conv2D(int(channel_depth / 4), kernel_size=3, strides=stride, padding="same")(input)
    input = BatchNormalization()(input)
    input = Activation("relu")(input)

    input = Conv2D(channel_depth, kernel_size=1, strides=stride, padding="same")(input)
    input = BatchNormalization()(input)

    input = add([input, residual_input])
    input = Activation("relu")(input)

    return input 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:24,代码来源:resnet50.py

示例3: __transition_block

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    return x 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:22,代码来源:densenet.py

示例4: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def build(self, input_layer):
        last_layer = input_layer
        input_shape = K.int_shape(input_layer)

        if self.with_embedding:
            if input_shape[-1] != 1:
                raise ValueError("Only one feature (the index) can be used with embeddings, "
                                 "i.e. the input shape should be (num_samples, length, 1). "
                                 "The actual shape was: " + str(input_shape))

            last_layer = Lambda(lambda x: K.squeeze(x, axis=-1),
                                output_shape=K.int_shape(last_layer)[:-1])(last_layer)  # Remove feature dimension.
            last_layer = Embedding(self.embedding_size, self.embedding_dimension,
                                   input_length=input_shape[-2])(last_layer)

        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
开发者ID:d909b,项目名称:cxplain,代码行数:24,代码来源:rnn.py

示例5: __conv_block

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4):
    ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
    Args:
        ip: Input keras tensor
        nb_filter: number of filters
        bottleneck: add bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)

    if bottleneck:
        inter_channel = nb_filter * 4  # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua

        x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
                   kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)

    x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:30,代码来源:densenet.py

示例6: __init__

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def __init__(self, fused=None, cross_replica_average_fn=None, **kwargs):
    super(BatchNormalization, self).__init__(**kwargs)
    self.cross_replica_average_fn = cross_replica_average_fn

    if fused and cross_replica_average_fn is not None:
      raise ValueError('fused must be `False` when sepcifying'
                       ' cross_replica_average_fn') 
开发者ID:Octavian-ai,项目名称:BigGAN-TPU-TensorFlow,代码行数:9,代码来源:tpu_normalization.py

示例7: _moments

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def _moments(self, inputs, reduction_axes, keep_dims):
    mean, variance = super(BatchNormalization, self)._moments(
        inputs, reduction_axes, keep_dims=keep_dims)
    if self.cross_replica_average_fn:
      mean = self.cross_replica_average_fn(mean)
      variance = self.cross_replica_average_fn(variance)
    return (mean, variance) 
开发者ID:Octavian-ai,项目名称:BigGAN-TPU-TensorFlow,代码行数:9,代码来源:tpu_normalization.py

示例8: cross_replica_batch_normalization

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def cross_replica_batch_normalization(inputs,
                                      training=False,
                                      num_distributed_groups=1,
                                      **kwargs):
  """Functional interface for the cross replica batch normalization layer.


  For detailed information of arguments and implementation, refer to:
  https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization

  Arguments:
    inputs: Tensor input.
    training: Either a Python boolean, or a TensorFlow boolean scalar tensor
      (e.g. a placeholder). Whether to return the output in training mode
      (normalized with statistics of the current batch) or in inference mode
      (normalized with moving statistics). **NOTE**: make sure to set this
        parameter correctly, or else your training/inference will not work
        properly.
    num_distributed_groups: Number of groups to normalize in the distributed
      batch normalization. Replicas will evenly split into groups. For example,
      1 for global batch norm and -1 or None for per-replica batch norm.
    **kwargs: For passing through arguments to BatchNormalization.

  Returns:
    Output tensor.

  Raises:
    ValueError: if eager execution is enabled.
  """
  layer = BatchNormalization(
      cross_replica_average_fn=functools.partial(
          cross_replica_average, num_groups=num_distributed_groups),
      **kwargs)
  return layer.apply(inputs, training=training) 
开发者ID:Octavian-ai,项目名称:BigGAN-TPU-TensorFlow,代码行数:36,代码来源:tpu_normalization.py

示例9: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def build(self, input_layer):
        last_layer = input_layer
        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
开发者ID:d909b,项目名称:cxplain,代码行数:11,代码来源:mlp.py

示例10: _bn_relu

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def _bn_relu(input):
    """Helper to build a BN -> relu block
    """
    norm = BatchNormalization(axis=CHANNEL_AXIS)(input)
    return Activation("relu")(norm) 
开发者ID:marco-willi,项目名称:camera-trap-classifier,代码行数:7,代码来源:resnet.py

示例11: __init__

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def __init__(self, game, encoder):
        """
        NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels
        :param game: game configuration
        :param encoder: Encoder, used to encode game boards
        """
        from rts.src.config_class import CONFIG

        # game params
        self.board_x, self.board_y, num_encoders = game.getBoardSize()
        self.action_size = game.getActionSize()

        """
        num_encoders = CONFIG.nnet_args.encoder.num_encoders
        """
        num_encoders = encoder.num_encoders

        # Neural Net
        self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders))  # s: batch_size x board_x x board_y x num_encoders

        x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards)  # batch_size  x board_x x board_y x num_encoders
        h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image)))  # batch_size  x board_x x board_y x num_channels
        h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1)))  # batch_size  x board_x x board_y x num_channels
        h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2)))  # batch_size  x (board_x-2) x (board_y-2) x num_channels
        h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3)))  # batch_size  x (board_x-4) x (board_y-4) x num_channels
        h_conv4_flat = Flatten()(h_conv4)
        s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat))))  # batch_size x 1024
        s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1))))  # batch_size x 1024
        self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2)  # batch_size x self.action_size
        self.v = Dense(1, activation='tanh', name='v')(s_fc2)  # batch_size x 1

        self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
        self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr)) 
开发者ID:suragnair,项目名称:alpha-zero-general,代码行数:35,代码来源:RTSNNet.py

示例12: __init__

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def __init__(self, filters, kernel, strides):
        '''
        Constructs a Seperable Convolution - Batch Normalization - Relu block.
        '''
        super(SeperableConvolution, self).__init__()

        self.conv = SeparableConv2D(filters, kernel, strides=strides, padding='same',
                                    kernel_initializer='he_uniform')
        self.bn = BatchNormalization() 
开发者ID:titu1994,项目名称:progressive-neural-architecture-search,代码行数:11,代码来源:ops.py

示例13: conv2d_bn

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def conv2d_bn(x,
              filters,
              num_row,
              num_col,
              padding='same',
              strides=(1, 1),
              name=None):
    """Utility function to apply conv + BN.

    # Arguments
        x: input tensor.
        filters: filters in `Conv2D`.
        num_row: height of the convolution kernel.
        num_col: width of the convolution kernel.
        padding: padding mode in `Conv2D`.
        strides: strides in `Conv2D`.
        name: name of the ops; will become `name + '_conv'`
            for the convolution and `name + '_bn'` for the
            batch norm layer.

    # Returns
        Output tensor after applying `Conv2D` and `BatchNormalization`.
    """
    if name is not None:
        bn_name = name + '_bn'
        conv_name = name + '_conv'
    else:
        bn_name = None
        conv_name = None
    if K.image_data_format() == 'channels_first':
        bn_axis = 1
    else:
        bn_axis = 3
    x = Conv2D(
        filters, (num_row, num_col),
        strides=strides,
        padding=padding,
        use_bias=False,
        name=conv_name)(x)
    x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
    x = Activation('relu', name=name)(x)
    return x 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:44,代码来源:inceptionv3.py

示例14: identity_block

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def identity_block(input_tensor, kernel_size, filters, stage, block):
  """The identity block is the block that has no conv layer at shortcut.

  # Arguments
      input_tensor: input tensor
      kernel_size: default 3, the kernel size of
          middle conv layer at main path
      filters: list of integers, the filters of 3 conv layer at main path
      stage: integer, current stage label, used for generating layer names
      block: 'a','b'..., current block label, used for generating layer names

  # Returns
      Output tensor for the block.
  """
  filters1, filters2, filters3 = filters
  if backend.image_data_format() == 'channels_last':
    bn_axis = 3
  else:
    bn_axis = 1
  conv_name_base = 'res' + str(stage) + block + '_branch'
  bn_name_base = 'bn' + str(stage) + block + '_branch'

  x = layers.Conv2D(filters1, (1, 1),
                    kernel_initializer='he_normal',
                    kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    name=conv_name_base + '2a')(input_tensor)
  x = layers.BatchNormalization(axis=bn_axis,
                                momentum=BATCH_NORM_DECAY,
                                epsilon=BATCH_NORM_EPSILON,
                                name=bn_name_base + '2a')(x)
  x = layers.Activation('relu')(x)

  x = layers.Conv2D(filters2, kernel_size,
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    name=conv_name_base + '2b')(x)
  x = layers.BatchNormalization(axis=bn_axis,
                                momentum=BATCH_NORM_DECAY,
                                epsilon=BATCH_NORM_EPSILON,
                                name=bn_name_base + '2b')(x)
  x = layers.Activation('relu')(x)

  x = layers.Conv2D(filters3, (1, 1),
                    kernel_initializer='he_normal',
                    kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    name=conv_name_base + '2c')(x)
  x = layers.BatchNormalization(axis=bn_axis,
                                momentum=BATCH_NORM_DECAY,
                                epsilon=BATCH_NORM_EPSILON,
                                name=bn_name_base + '2c')(x)

  x = layers.add([x, input_tensor])
  x = layers.Activation('relu')(x)
  return x 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:60,代码来源:resnet_model.py

示例15: identity_block

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import BatchNormalization [as 别名]
def identity_block(input_tensor, kernel_size, filters, stage, block):
  """The identity block is the block that has no conv layer at shortcut.

  Args:
      input_tensor: input tensor
      kernel_size: default 3, the kernel size of
          middle conv layer at main path
      filters: list of integers, the filters of 3 conv layer at main path
      stage: integer, current stage label, used for generating layer names
      block: 'a','b'..., current block label, used for generating layer names

  Returns:
      Output tensor for the block.
  """
  filters1, filters2, filters3 = filters
  if backend.image_data_format() == 'channels_last':
    bn_axis = 3
  else:
    bn_axis = 1
  conv_name_base = 'res' + str(stage) + block + '_branch'
  bn_name_base = 'bn' + str(stage) + block + '_branch'

  x = layers.Conv2D(filters1, (1, 1), use_bias=False,
                    kernel_initializer='he_normal',
                    kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    name=conv_name_base + '2a')(input_tensor)
  x = layers.BatchNormalization(axis=bn_axis,
                                momentum=BATCH_NORM_DECAY,
                                epsilon=BATCH_NORM_EPSILON,
                                name=bn_name_base + '2a')(x)
  x = layers.Activation('relu')(x)

  x = layers.Conv2D(filters2, kernel_size, use_bias=False,
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    name=conv_name_base + '2b')(x)
  x = layers.BatchNormalization(axis=bn_axis,
                                momentum=BATCH_NORM_DECAY,
                                epsilon=BATCH_NORM_EPSILON,
                                name=bn_name_base + '2b')(x)
  x = layers.Activation('relu')(x)

  x = layers.Conv2D(filters3, (1, 1), use_bias=False,
                    kernel_initializer='he_normal',
                    kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                    name=conv_name_base + '2c')(x)
  x = layers.BatchNormalization(axis=bn_axis,
                                momentum=BATCH_NORM_DECAY,
                                epsilon=BATCH_NORM_EPSILON,
                                name=bn_name_base + '2c')(x)

  x = layers.add([x, input_tensor])
  x = layers.Activation('relu')(x)
  return x 
开发者ID:artyompal,项目名称:tpu_models,代码行数:57,代码来源:resnet_model.py


注:本文中的tensorflow.python.keras.layers.BatchNormalization方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。