当前位置: 首页>>代码示例>>Python>>正文


Python layers.batch_norm方法代码示例

本文整理汇总了Python中tensorflow.contrib.layers.python.layers.layers.batch_norm方法的典型用法代码示例。如果您正苦于以下问题:Python layers.batch_norm方法的具体用法?Python layers.batch_norm怎么用?Python layers.batch_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.layers.python.layers.layers的用法示例。


在下文中一共展示了layers.batch_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: resnet_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
    batch_norm_params = {
        'is_training': False,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': False,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }
    with arg_scope(
            [slim.conv2d],
            weights_initializer=slim.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
开发者ID:JudyYe,项目名称:zero-shot-gcn,代码行数:23,代码来源:extract_pool5.py

示例2: inception_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def inception_arg_scope(is_training=True,
                        batch_norm_decay=0.997,
                        batch_norm_epsilon=1e-5,
                        batch_norm_scale=True):
    batch_norm_params = {
        'is_training': False,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'trainable': False,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }
    with arg_scope(
            [slim.conv2d],
            weights_initializer=slim.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
开发者ID:JudyYe,项目名称:zero-shot-gcn,代码行数:22,代码来源:extract_pool5.py

示例3: predictron_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def predictron_arg_scope(weight_decay=0.0001,
                         batch_norm_decay=0.997,
                         batch_norm_epsilon=1e-5,
                         batch_norm_scale=True):
  batch_norm_params = {
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=None,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
开发者ID:zhongwen,项目名称:predictron,代码行数:24,代码来源:util.py

示例4: _fully_connected

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def _fully_connected(input_data, num_output, name, relu=True):
    with tf.variable_scope(name) as scope:
        input_shape = input_data.get_shape()
        if input_shape.ndims == 5:
            dim = 1
            for d in input_shape[1:].as_list():
                dim *= d
            feed_in = tf.reshape(input_data, [-1, dim])
        else:
            feed_in, dim = (input_data, input_shape[-1].value)
        weights = tf.get_variable(name="weights", shape=[dim, num_output],
                                  regularizer=tf.contrib.layers.l2_regularizer(scale=0.0001),
                                  initializer=tf.truncated_normal_initializer(stddev=1e-1, dtype=tf.float32))
                                  #initializer=tf.contrib.layers.xavier_initializer(uniform=True))
        biases = tf.get_variable(name="biases", shape=[num_output], dtype=tf.float32,
                                 initializer=tf.constant_initializer(value=0.0))
        op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
        output = op(feed_in, weights, biases, name=scope.name)
        return batch_norm(output) 
开发者ID:PacktPublishing,项目名称:Machine-Learning-with-TensorFlow-1.x,代码行数:21,代码来源:nets.py

示例5: conv2d

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def conv2d(
        inputs, filters, bias=None,
        strides=list([1, 1, 1, 1]), padding='SAME', dilations=list([1, 1, 1, 1]),
        to_batch_norm=False, batch_norm_decay=0.997, is_training=True, activation_fn=None, name=None
):
    output = tf.nn.conv2d(
        input=inputs,
        filter=filters,
        strides=strides,
        padding=padding,
        dilations=dilations,
        name=name
    )

    if bias is not None:
        output = tf.nn.bias_add(output, bias)
    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)
    if activation_fn is not None:
        output = activation_fn(output)
    return output 
开发者ID:POSTECH-IMLAB,项目名称:LaneSegmentationNetwork,代码行数:23,代码来源:tf_util.py

示例6: multi_conv2d

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def multi_conv2d(inputs, filters: tf.Tensor, bias=None, stride=list([1, 1, 1, 1]),
                 padding='SAME', basis_rate=list([1, 3, 5]), to_batch_norm=False, batch_norm_decay=0.997,
                 is_training=True, activation_fn=None):
    _number_of_basis = len(basis_rate)
    if _number_of_basis < 2:
        raise ValueError('Number of basis_rate must be larger or equal than 2')

    output = conv2d(inputs, filters, bias, stride, padding)
    for idx, r in enumerate(basis_rate):
        output += atrous_conv2d(inputs, filters, r, bias, padding, stride)
    output /= _number_of_basis

    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)

    if activation_fn is not None:
        output = activation_fn(output)

    return output 
开发者ID:POSTECH-IMLAB,项目名称:LaneSegmentationNetwork,代码行数:21,代码来源:tf_util.py

示例7: scale_conv2d

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def scale_conv2d(inputs, filters: tf.Tensor, bias=None, stride=list([1, 1, 1, 1]), padding='SAME',
                 initial_step=1, number_of_step=5, step_multiplier=1.25,
                 to_batch_norm=False, batch_norm_decay=0.997, is_training=True, activation_fn=None):
    _step = initial_step
    output = bilinear_conv2d(inputs, filters, _step, bias, padding, stride)

    for i in range(1, number_of_step):
        _step *= step_multiplier
        output += bilinear_conv2d(inputs, filters, _step, bias, padding, stride)
    output /= number_of_step

    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)

    if activation_fn is not None:
        output = activation_fn(output)

    return output 
开发者ID:POSTECH-IMLAB,项目名称:LaneSegmentationNetwork,代码行数:20,代码来源:tf_util.py

示例8: inception_v2_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
开发者ID:MingtaoGuo,项目名称:Chinese-Character-and-Calligraphic-Image-Processing,代码行数:41,代码来源:inception_v2.py

示例9: BatchNormClassifier

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def BatchNormClassifier(inputs):
  inputs = layers.batch_norm(inputs, decay=0.1)
  return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:5,代码来源:learning_test.py

示例10: inception

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def inception():
    image = tf.placeholder(tf.float32, [None, 224, 224, 3], 'image')
    with slim.arg_scope(inception_arg_scope(is_training=False)):
        with variable_scope.variable_scope(
                'InceptionV1', 'InceptionV1', [image, 1000], reuse=None) as scope:
            with arg_scope(
                    [layers_lib.batch_norm, layers_lib.dropout], is_training=False):
                net, end_points = inception_v1_base(image, scope=scope)
                with variable_scope.variable_scope('Logits'):
                    net_conv = layers_lib.avg_pool2d(
                        net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
    print(net_conv.shape)

    return net_conv, image 
开发者ID:JudyYe,项目名称:zero-shot-gcn,代码行数:16,代码来源:extract_pool5.py

示例11: forward_train

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def forward_train(self, train_input):

        batch_norm_params = {'epsilon': 1e-5,
                             'scale': True,
                             'is_training': True,
                             'updates_collections': ops.GraphKeys.UPDATE_OPS}

        with slim.arg_scope([layers.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_initializer=he_normal_fanout(),
                                weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])):
                final_logit = self._forward(train_input)

        return final_logit 
开发者ID:yuanyuanli85,项目名称:tf-hrnet,代码行数:16,代码来源:model.py

示例12: forward_eval

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def forward_eval(self, eval_input):

        batch_norm_params = {'epsilon': 1e-5,
                             'scale': True,
                             'is_training': False,
                             'updates_collections': ops.GraphKeys.UPDATE_OPS}

        with slim.arg_scope([layers.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])):
                final_logit = self._forward(eval_input)

        return final_logit 
开发者ID:yuanyuanli85,项目名称:tf-hrnet,代码行数:15,代码来源:model.py

示例13: resnet_arg_scope

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
开发者ID:pengzhou1108,项目名称:RGB-N,代码行数:28,代码来源:resnet_fusion_noise.py

示例14: BatchNormClassifier

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def BatchNormClassifier(self, inputs):
        inputs = layers.batch_norm(inputs, decay=0.1, fused=None)
        return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
开发者ID:kujason,项目名称:avod,代码行数:5,代码来源:trainer_test.py

示例15: _conv3d

# 需要导入模块: from tensorflow.contrib.layers.python.layers import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.layers import batch_norm [as 别名]
def _conv3d(input_data, k_d, k_h, k_w, c_o, s_d, s_h, s_w, name, relu=True, padding="SAME"):
    c_i = input_data.get_shape()[-1].value
    convolve = lambda i, k: tf.nn.conv3d(i, k, [1, s_d, s_h, s_w, 1], padding=padding)
    with tf.variable_scope(name) as scope:
        weights = tf.get_variable(name="weights", shape=[k_d, k_h, k_w, c_i, c_o],
                                  regularizer=tf.contrib.layers.l2_regularizer(scale=0.0001),
                                  initializer=tf.truncated_normal_initializer(stddev=1e-1, dtype=tf.float32))
                                  #initializer=tf.contrib.layers.xavier_initializer(uniform=True))
        conv = convolve(input_data, weights)
        biases = tf.get_variable(name="biases", shape=[c_o], dtype=tf.float32,
                                 initializer=tf.constant_initializer(value=0.0))
        output = tf.nn.bias_add(conv, biases)
        if relu:
            output = tf.nn.relu(output, name=scope.name)
        return batch_norm(output) 
开发者ID:PacktPublishing,项目名称:Machine-Learning-with-TensorFlow-1.x,代码行数:17,代码来源:nets.py


注:本文中的tensorflow.contrib.layers.python.layers.layers.batch_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。