本文整理汇总了Python中tensorflow.contrib.layers.python.layers.batch_norm方法的典型用法代码示例。如果您正苦于以下问题:Python layers.batch_norm方法的具体用法?Python layers.batch_norm怎么用?Python layers.batch_norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers.python.layers
的用法示例。
在下文中一共展示了layers.batch_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: basic_residual_block
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def basic_residual_block(input, planes, scope, kernel_size=3, stride=1, downsamplefn=None):
'''
:param planes: kernel number , output channel
:param stride: stride
:param downsample: downsample fn
:return:
'''
residual = input
with tf.variable_scope(scope):
_out = slim.conv2d(input, num_outputs=planes, kernel_size=[kernel_size, kernel_size],
stride=stride, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
_out = slim.conv2d(_out, num_outputs=planes, kernel_size=[kernel_size, kernel_size],
stride=stride, activation_fn=None, normalizer_fn=batch_norm)
if downsamplefn is not None:
residual = downsamplefn(residual)
out = _out + residual
out = tf.nn.relu(out)
return out
示例2: upsample_block
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def upsample_block(input, ratio, planes, scope):
'''
:param input:
:param planes:
:param scope:
:param kernel_size:
:param stride:
:return:
'''
with tf.variable_scope(scope):
_out = slim.conv2d(input, num_outputs=planes, kernel_size=[1, 1], stride=1,
activation_fn=None, normalizer_fn=batch_norm, padding='SAME')
shape = _out.shape
_out = tf.image.resize_nearest_neighbor(_out, (shape[1] * ratio, shape[1] * ratio))
return _out
示例3: forward
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def forward(self, input):
with tf.variable_scope(self.scope):
# conv1 + bn1 + relu1
_out = slim.conv2d(input, num_outputs=self.num_channels, kernel_size=[3, 3],
stride=2, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
# conv2 + bn2 + relu2
_out = slim.conv2d(_out, num_outputs=self.num_channels, kernel_size=[3, 3],
stride=2, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
# bottlenect
for i in range(self.num_blocks):
_out = bottleneck_block(_out, planes=self.bottleneck_channels,
scope='_BN' + str(i), downsamplefn=trans_block if i == 0 else None)
# one 3x3 keep same resolution and one 3x3 to 1/2x resolution
_same_res = slim.conv2d(_out, num_outputs=self.output_channels[0], kernel_size=[3, 3],
stride=1, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
_half_res = slim.conv2d(_out, num_outputs=self.output_channels[1], kernel_size=[3, 3],
stride=2, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
return [_same_res, _half_res]
示例4: normalize
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def normalize(inp, activation, reuse, scope):
"""The function to forward the normalization.
Args:
inp: the input feature maps.
reuse: whether reuse the variables for the batch norm.
scope: the label for this conv layer.
activation: the activation function for this conv layer.
Return:
The processed feature maps.
"""
if FLAGS.norm == 'batch_norm':
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'layer_norm':
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'None':
if activation is not None:
return activation(inp)
return inp
else:
raise ValueError('Please set correct normalization.')
## Loss functions
示例5: batch_normalization
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def batch_normalization(input_data, name, activation_method="relu", is_train=True):
"""
BN layer
:param input_data: the input data
:param name: name
:param activation_method: the method of activation function
:param is_train: if False, skip this layer, default is True
:return:
output: output after batch normalization
"""
output = batch_norm(inputs=input_data, decay=0.9, center=True, scale=True, epsilon=1e-5, scope=name, updates_collections=None,
reuse=tf.AUTO_REUSE, is_training=is_train, zero_debias_moving_mean=True)
output = activation_layer(input_data=output,
activation_method=activation_method)
print("name: %s, activation: %s, is_train: %r" % (name, activation_method, is_train))
return output
示例6: batch_norm_layer
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def batch_norm_layer(x,train_phase,scope_bn):
outputs = tf.contrib.layers.batch_norm(x, is_training=train_phase, center=False, scale=False, activation_fn=tf.nn.relu, updates_collections=None, scope='batch_norm')
return outputs
# def batch_norm_layer(x,train_phase,scope_bn):
# bn_train = batch_norm(x, decay=0.999, center=True, scale=True,
# updates_collections=None,
# is_training=True,
# reuse=None, # is this right?
# trainable=True,
# scope=scope_bn)
# bn_inference = batch_norm(x, decay=0.999, center=True, scale=True,
# updates_collections=None,
# is_training=False,
# reuse=True, # is this right?
# trainable=True,
# scope=scope_bn)
# z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
# return z
示例7: batch_norm_layer
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def batch_norm_layer(x,train_phase,scope_bn):
bn_train = batch_norm(x, decay=0.999, center=True, scale=True,
updates_collections=None,
is_training=True,
reuse=None, # is this right?
trainable=True,
scope=scope_bn)
bn_inference = batch_norm(x, decay=0.999, center=True, scale=True,
updates_collections=None,
is_training=False,
reuse=True, # is this right?
trainable=True,
scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
#def batch_norm_layer(x,train_phase,scope_bn):
# outputs = tf.contrib.layers.batch_norm(x, is_training=train_phase, center=False, scale=False, activation_fn=tf.nn.relu, updates_collections=None, scope='scope_bn)
示例8: batch_norm_layer
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
示例9: normalize
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def normalize(inp, activation, reuse, scope):
if FLAGS.norm == 'batch_norm':
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'layer_norm':
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'None':
if activation is not None:
return activation(inp)
else:
return inp
# Loss functions
示例10: batch_normal
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def batch_normal(input , scope="scope" , reuse=False):
return batch_norm(input , epsilon=1e-5, decay=0.9 , scale=True, scope=scope , reuse=reuse, fused=True, updates_collections=None)
示例11: normalize
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def normalize(inp, activation, reuse, scope):
if FLAGS.norm == 'batch_norm':
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'layer_norm':
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'None':
if activation is not None:
return activation(inp)
else:
return inp
## Loss functions
示例12: bottleneck_block
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def bottleneck_block(input, planes, scope, stride=1, downsamplefn=None):
'''
:param planes: kernel number , output channel
:param stride: stride
:param downsample: downsample fn
:return:
'''
expansion = 4
residual = input
with tf.variable_scope(scope):
# conv1x1 + bn1 + relu
_out = slim.conv2d(input, num_outputs=planes // expansion, kernel_size=[1, 1],
stride=1, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
# conv2 3x3 + bn2 + relu
_out = slim.conv2d(_out, num_outputs=planes // expansion, kernel_size=[3, 3],
stride=stride, activation_fn=tf.nn.relu, normalizer_fn=batch_norm)
# conv3 1x1 + bn3
_out = slim.conv2d(_out, num_outputs=planes, kernel_size=[1, 1],
stride=1, activation_fn=None, normalizer_fn=batch_norm)
if downsamplefn is not None:
residual = downsamplefn(residual, planes)
out = _out + residual
out = tf.nn.relu(out)
return out
示例13: trans_block
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def trans_block(input, planes):
# conv 1x1 + bn
_out = slim.conv2d(input, num_outputs=planes, kernel_size=[1, 1],
stride=1, activation_fn=None, normalizer_fn=batch_norm)
return _out
示例14: downsample_block
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def downsample_block(input, planes, scope, has_relu):
'''
:param input:
:param planes:
:param scope:
:return:
'''
with tf.variable_scope(scope):
_out = slim.conv2d(input, num_outputs=planes, kernel_size=[3, 3], stride=2,
activation_fn=tf.nn.relu if has_relu else None,
normalizer_fn=batch_norm, padding='SAME')
return _out
示例15: batch_normal
# 需要导入模块: from tensorflow.contrib.layers.python import layers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers import batch_norm [as 别名]
def batch_normal(input, scope="scope", reuse=False):
return batch_norm(input, epsilon=1e-5, decay=0.9, scale=True, scope=scope, reuse=reuse, fused=True, updates_collections=None)