本文整理汇总了Python中tensorpack.models.BatchNorm方法的典型用法代码示例。如果您正苦于以下问题:Python models.BatchNorm方法的具体用法?Python models.BatchNorm怎么用?Python models.BatchNorm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.models
的用法示例。
在下文中一共展示了models.BatchNorm方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: BNNoReLU
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def BNNoReLU(x, name=None):
"""
A shorthand of BatchNormalization.
"""
if name is None:
x = BatchNorm('bn', x)
else:
x = BatchNorm(name, x)
return x
示例2: resnet
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def resnet(input_, option):
mode = option.mode
DEPTH = option.depth
bottleneck = {'se': se_resnet_bottleneck}[mode]
cfg = {
50: ([3, 4, 6, 3], bottleneck),
}
defs, block_func = cfg[DEPTH]
group_func = resnet_group
with argscope(Conv2D, use_bias=False, kernel_initializer= \
tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm],
data_format='channels_first'):
l = Conv2D('conv0', input_, 64, 7, strides=2, activation=BNReLU)
if option.gating_position[0]: l = gating_op(l, option)
l = MaxPooling('pool0', l, 3, strides=2, padding='SAME')
if option.gating_position[1]: l = gating_op(l, option)
l = group_func('group0', l, block_func, 64, defs[0], 1, option)
if option.gating_position[2]: l = gating_op(l, option)
l = group_func('group1', l, block_func, 128, defs[1], 2, option)
if option.gating_position[3]: l = gating_op(l, option)
l = group_func('group2', l, block_func, 256, defs[2], 2, option)
if option.gating_position[4]: l = gating_op(l, option)
l = group_func('group3', l, block_func, 512, defs[3],
1, option)
if option.gating_position[5]: l = gating_op(l, option)
p_logits = GlobalAvgPooling('gap', l)
logits = FullyConnected('linearnew', p_logits, option.number_of_class)
return logits, l
示例3: get_bn
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def get_bn(zero_init=False):
if zero_init:
return lambda x, name=None: BatchNorm(
'bn', x, gamma_initializer=tf.zeros_initializer())
else:
return lambda x, name=None: BatchNorm('bn', x)
示例4: get_bn
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def get_bn(zero_init=False):
"""
Zero init gamma is good for resnet. See https://arxiv.org/abs/1706.02677.
"""
if zero_init:
return lambda x, name=None: BatchNorm('bn', x, gamma_initializer=tf.zeros_initializer())
else:
return lambda x, name=None: BatchNorm('bn', x)
# ----------------- pre-activation resnet ----------------------
示例5: backbone_scope
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def backbone_scope(freeze):
"""
Args:
freeze (bool): whether to freeze all the variables under the scope
"""
def nonlin(x):
x = get_norm()(x)
return tf.nn.relu(x)
with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False, activation=nonlin,
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out')), \
ExitStack() as stack:
if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']:
if freeze or cfg.BACKBONE.NORM == 'FreezeBN':
stack.enter_context(argscope(BatchNorm, training=False))
else:
stack.enter_context(argscope(
BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod'))
if freeze:
stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True))
else:
# the layers are not completely freezed, but we may want to only freeze the affine
if cfg.BACKBONE.FREEZE_AFFINE:
stack.enter_context(custom_getter_scope(freeze_affine_getter))
yield
示例6: get_norm
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def get_norm(zero_init=False):
if cfg.BACKBONE.NORM == 'None':
return lambda x: x
if cfg.BACKBONE.NORM == 'GN':
Norm = GroupNorm
layer_name = 'gn'
else:
Norm = BatchNorm
layer_name = 'bn'
return lambda x: Norm(layer_name, x, gamma_initializer=tf.zeros_initializer() if zero_init else None)
示例7: RegionNorm
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def RegionNorm(x, h_group_num, w_group_num, gamma_initializer=tf.constant_initializer(1.)):
# 1. pad so that h % h_group_num == 0, w % w_group_num == 0
orig_shape = x.get_shape().as_list()
h, w = orig_shape[1], orig_shape[2]
new_h = get_pad_num(h, h_group_num)
new_w = get_pad_num(w, w_group_num)
x_resized = tf.image.resize_images(x, [new_h, new_w], align_corners=False)
# 2. split and stack all grid
assert new_h % h_group_num == 0
sub_h = new_h // h_group_num
assert new_w % w_group_num == 0
sub_w = new_w // w_group_num
sub_grids = []
for i in range(0, new_h, sub_h):
for j in range(0, new_w, sub_w):
x_sub_grid = x_resized[:, i:i + sub_h, j:j + sub_w, :, None]
sub_grids.append(x_sub_grid)
sub_grids = tf.concat(sub_grids, axis=4)
sub_grids_shape = sub_grids.get_shape().as_list()
feed2bn = tf.reshape(sub_grids,
[-1, sub_grids_shape[1], sub_grids_shape[2] * sub_grids_shape[3],
sub_grids_shape[4]])
# 3. normalization
bn_output = BatchNorm('bn', feed2bn, axis=3, gamma_initializer=gamma_initializer,
internal_update=True, sync_statistics='nccl')
# 4. go back to original shape
new_sub_grids = tf.reshape(bn_output,
[-1, sub_grids_shape[1], sub_grids_shape[2], sub_grids_shape[3],
sub_grids_shape[4]])
counter = 0
new_rows = []
for i in range(0, new_h, sub_h):
new_row = []
for j in range(0, new_w, sub_w):
new_row.append(new_sub_grids[:, :, :, :, counter])
counter += 1
new_row = tf.concat(new_row, axis=2)
new_rows.append(new_row)
new_x_resized = tf.concat(new_rows, axis=1)
# 5. resize back
new_x = tf.image.resize_images(new_x_resized, [h, w], align_corners=False)
return new_x
示例8: vgg_gap
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import BatchNorm [as 别名]
def vgg_gap(image, option):
with argscope(Conv2D, use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \
argscope([Conv2D, MaxPooling, BatchNorm, GlobalAvgPooling],
data_format='channels_first'):
l = convnormrelu(image, 'conv1_1', 64)
if option.gating_position[11]: l = gating_op(l, option)
l = convnormrelu(l, 'conv1_2', 64)
if option.gating_position[12]: l = gating_op(l, option)
l = MaxPooling('pool1', l, 2)
if option.gating_position[1]: l = gating_op(l, option)
l = convnormrelu(l, 'conv2_1', 128)
if option.gating_position[21]: l = gating_op(l, option)
l = convnormrelu(l, 'conv2_2', 128)
if option.gating_position[22]: l = gating_op(l, option)
l = MaxPooling('pool2', l, 2)
if option.gating_position[2]: l = gating_op(l, option)
l = convnormrelu(l, 'conv3_1', 256)
if option.gating_position[31]: l = gating_op(l, option)
l = convnormrelu(l, 'conv3_2', 256)
if option.gating_position[32]: l = gating_op(l, option)
l = convnormrelu(l, 'conv3_3', 256)
if option.gating_position[33]: l = gating_op(l, option)
l = MaxPooling('pool3', l, 2)
if option.gating_position[3]: l = gating_op(l, option)
l = convnormrelu(l, 'conv4_1', 512)
if option.gating_position[41]: l = gating_op(l, option)
l = convnormrelu(l, 'conv4_2', 512)
if option.gating_position[42]: l = gating_op(l, option)
l = convnormrelu(l, 'conv4_3', 512)
if option.gating_position[43]: l = gating_op(l, option)
l = MaxPooling('pool4', l, 2)
if option.gating_position[4]: l = gating_op(l, option)
l = convnormrelu(l, 'conv5_1', 512)
if option.gating_position[51]: l = gating_op(l, option)
l = convnormrelu(l, 'conv5_2', 512)
if option.gating_position[52]: l = gating_op(l, option)
l = convnormrelu(l, 'conv5_3', 512)
if option.gating_position[53]: l = gating_op(l, option)
convmaps = convnormrelu(l, 'new', 1024)
if option.gating_position[6]: convmaps = gating_op(l, option)
p_logits = GlobalAvgPooling('gap', convmaps)
logits = FullyConnected('linear',
p_logits, option.number_of_class,
kernel_initializer=tf.random_normal_initializer(
stddev=0.01))
return logits, convmaps