当前位置: 首页>>代码示例>>Python>>正文


Python slim.batch_norm方法代码示例

本文整理汇总了Python中tensorflow.contrib.slim.batch_norm方法的典型用法代码示例。如果您正苦于以下问题:Python slim.batch_norm方法的具体用法?Python slim.batch_norm怎么用?Python slim.batch_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.slim的用法示例。


在下文中一共展示了slim.batch_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: delf_attention

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def delf_attention(feature_map, config, is_training, arg_scope=None):
    with tf.variable_scope('attonly/attention/compute'):
        with slim.arg_scope(arg_scope):
            is_training = config['train_attention'] and is_training
            with slim.arg_scope([slim.conv2d, slim.batch_norm],
                                trainable=is_training):
                with slim.arg_scope([slim.batch_norm], is_training=is_training):
                    attention = slim.conv2d(
                            feature_map, 512, config['attention_kernel'], rate=1,
                            activation_fn=tf.nn.relu, scope='conv1')
                    attention = slim.conv2d(
                            attention, 1, config['attention_kernel'], rate=1,
                            activation_fn=None, normalizer_fn=None, scope='conv2')
                    attention = tf.nn.softplus(attention)
    if config['normalize_feature_map']:
        feature_map = tf.nn.l2_normalize(feature_map, -1)
    descriptor = tf.reduce_sum(feature_map*attention, axis=[1, 2])
    if config['normalize_average']:
        descriptor /= tf.reduce_sum(attention, axis=[1, 2])
    return descriptor 
开发者ID:ethz-asl,项目名称:hierarchical_loc,代码行数:22,代码来源:layers.py

示例2: tower

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def tower(image, mode, config):
        image = image_normalization(image)
        if image.shape[-1] == 1:
            image = tf.tile(image, [1, 1, 1, 3])

        with slim.arg_scope(resnet.resnet_arg_scope()):
            is_training = config['train_backbone'] and (mode == Mode.TRAIN)
            with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=is_training):
                _, encoder = resnet.resnet_v1_50(image,
                                                 is_training=is_training,
                                                 global_pool=False,
                                                 scope='resnet_v1_50')
        feature_map = encoder['resnet_v1_50/block3']

        if config['use_attention']:
            descriptor = delf_attention(feature_map, config, mode == Mode.TRAIN,
                                        resnet.resnet_arg_scope())
        else:
            descriptor = tf.reduce_max(feature_map, [1, 2])

        if config['dimensionality_reduction']:
            descriptor = dimensionality_reduction(descriptor, config)
        return descriptor 
开发者ID:ethz-asl,项目名称:hierarchical_loc,代码行数:25,代码来源:delf.py

示例3: tower

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def tower(image, mode, config):
        image = image_normalization(image)
        if image.shape[-1] == 1:
            image = tf.tile(image, [1, 1, 1, 3])

        with slim.arg_scope(resnet.resnet_arg_scope()):
            training = config['train_backbone'] and (mode == Mode.TRAIN)
            with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=training):
                _, encoder = resnet.resnet_v1_50(image,
                                                 is_training=training,
                                                 global_pool=False,
                                                 scope='resnet_v1_50')
        feature_map = encoder['resnet_v1_50/block3']
        descriptor = vlad(feature_map, config, mode == Mode.TRAIN)
        if config['dimensionality_reduction']:
            descriptor = dimensionality_reduction(descriptor, config)
        return descriptor 
开发者ID:ethz-asl,项目名称:hierarchical_loc,代码行数:19,代码来源:netvlad_triplets.py

示例4: mobilenetv2_scope

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN

  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
开发者ID:DetectionTeamUCAS,项目名称:R2CNN_Faster-RCNN_Tensorflow,代码行数:23,代码来源:mobilenet_v2.py

示例5: _build_aux_head

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def _build_aux_head(net, end_points, num_classes, hparams, scope):
  """Auxiliary head used for all models across all datasets."""
  with tf.variable_scope(scope):
    aux_logits = tf.identity(net)
    with tf.variable_scope('aux_logits'):
      aux_logits = slim.avg_pool2d(
          aux_logits, [5, 5], stride=3, padding='VALID')
      aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
      aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
      aux_logits = tf.nn.relu(aux_logits)
      # Shape of feature map before the final layer.
      shape = aux_logits.shape
      if hparams.data_format == 'NHWC':
        shape = shape[1:3]
      else:
        shape = shape[2:4]
      aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
      aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
      aux_logits = tf.nn.relu(aux_logits)
      aux_logits = contrib_layers.flatten(aux_logits)
      aux_logits = slim.fully_connected(aux_logits, num_classes)
      end_points['AuxLogits'] = aux_logits 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:24,代码来源:nasnet_model.py

示例6: _cell_base

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def _cell_base(self, net, prev_layer):
    """Runs the beginning of the conv cell before the predicted ops are run."""
    num_filters = self._filter_size

    # Check to be sure prev layer stuff is setup correctly
    prev_layer = self._reduce_prev_layer(prev_layer, net)

    net = tf.nn.relu(net)
    net = slim.conv2d(net, num_filters, 1, scope='1x1')
    net = slim.batch_norm(net, scope='beginning_bn')
    split_axis = get_channel_index()
    net = tf.split(axis=split_axis, num_or_size_splits=1, value=net)
    for split in net:
      assert int(split.shape[split_axis] == int(
          self._num_conv_filters * self._filter_scaling))
    net.append(prev_layer)
    return net 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:19,代码来源:nasnet_utils.py

示例7: _extra_conv_arg_scope_with_bn

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001,
                     activation_fn=None,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):

  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=tf.nn.relu,
      normalizer_fn=slim.batch_norm,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc 
开发者ID:CharlesShang,项目名称:FastMaskRCNN,代码行数:25,代码来源:pyramid_network.py

示例8: inference

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.initializers.xavier_initializer(), 
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v2(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse) 
开发者ID:GaoangW,项目名称:TNT,代码行数:21,代码来源:inception_resnet_v2.py

示例9: inference

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def inference(images, keep_probability, phase_train=True,  # @UnusedVariable
              bottleneck_layer_size=128, bottleneck_layer_activation=None, weight_decay=0.0, reuse=None):  # @UnusedVariable
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        size = np.prod(images.get_shape()[1:].as_list())
        net = slim.fully_connected(tf.reshape(images, (-1,size)), bottleneck_layer_size, activation_fn=None, 
                scope='Bottleneck', reuse=False)
        return net, None 
开发者ID:GaoangW,项目名称:TNT,代码行数:24,代码来源:dummy.py

示例10: inference

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def inference(images, keep_probability, phase_train=True, 
              bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
    }
    
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_initializer=slim.initializers.xavier_initializer(), 
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params):
        return inception_resnet_v1(images, is_training=phase_train,
              dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse) 
开发者ID:GaoangW,项目名称:TNT,代码行数:22,代码来源:inception_resnet_v1.py

示例11: encoder

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
开发者ID:GaoangW,项目名称:TNT,代码行数:22,代码来源:dfc_vae_large.py

示例12: encoder

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def encoder(self, images, is_training):
        activation_fn = leaky_relu  # tf.nn.relu
        weight_decay = 0.0
        with tf.variable_scope('encoder'):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training):
                with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                    weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                    weights_regularizer=slim.l2_regularizer(weight_decay),
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=self.batch_norm_params):
                    net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1')
                    net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2')
                    net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3')
                    net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4')
                    net = slim.flatten(net)
                    fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1')
                    fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2')
        return fc1, fc2 
开发者ID:GaoangW,项目名称:TNT,代码行数:21,代码来源:dfc_vae.py

示例13: forward

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def forward(self):
        pad = [[self.lay.pad, self.lay.pad]] * 2;
        temp = tf.pad(self.inp.out, [[0, 0]] + pad + [[0, 0]])
        temp = tf.nn.conv2d(temp, self.lay.w['kernel'], padding = 'VALID', 
            name = self.scope, strides = [1] + [self.lay.stride] * 2 + [1])
        if self.lay.batch_norm: 
            temp = self.batchnorm(self.lay, temp)
        self.out = tf.nn.bias_add(temp, self.lay.w['biases']) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:10,代码来源:convolution.py

示例14: batchnorm

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def batchnorm(self, layer, inp):
        if not self.var:
            temp = (inp - layer.w['moving_mean'])
            temp /= (np.sqrt(layer.w['moving_variance']) + 1e-5)
            temp *= layer.w['gamma']
            return temp
        else:
            args = dict({
                'center' : False, 'scale' : True,
                'epsilon': 1e-5, 'scope' : self.scope,
                'updates_collections' : None,
                'is_training': layer.h['is_training'],
                'param_initializers': layer.w
                })
            return slim.batch_norm(inp, **args) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:17,代码来源:convolution.py

示例15: speak

# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import batch_norm [as 别名]
def speak(self):
        l = self.lay
        args = [l.ksize] * 2 + [l.pad] + [l.stride]
        args += [l.batch_norm * '+bnorm']
        args += [l.activation]
        msg = 'conv {}x{}p{}_{}  {}  {}'.format(*args)
        return msg 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:9,代码来源:convolution.py


注:本文中的tensorflow.contrib.slim.batch_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。