本文整理汇总了Python中tensorpack.models.Conv2D方法的典型用法代码示例。如果您正苦于以下问题:Python models.Conv2D方法的具体用法?Python models.Conv2D怎么用?Python models.Conv2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.models
的用法示例。
在下文中一共展示了models.Conv2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: res_blk
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def res_blk(name, l, ch, ksize, count, split=1, strides=1, freeze=False):
ch_in = l.get_shape().as_list()
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('block' + str(i)):
x = l if i == 0 else BNReLU('preact', l)
x = Conv2D('conv1', x, ch[0], ksize[0], activation=BNReLU)
x = Conv2D('conv2', x, ch[1], ksize[1], split=split,
strides=strides if i == 0 else 1, activation=BNReLU)
x = Conv2D('conv3', x, ch[2], ksize[2], activation=tf.identity)
if (strides != 1 or ch_in[1] != ch[2]) and i == 0:
l = Conv2D('convshortcut', l, ch[2], 1, strides=strides)
x = tf.stop_gradient(x) if freeze else x
l = l + x
# end of each group need an extra activation
l = BNReLU('bnlast',l)
return l
####
示例2: dense_blk
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def dense_blk(name, l, ch, ksize, count, split=1, padding='valid'):
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('blk/' + str(i)):
x = BNReLU('preact_bna', l)
x = Conv2D('conv1', x, ch[0], ksize[0], padding=padding, activation=BNReLU)
x = Conv2D('conv2', x, ch[1], ksize[1], padding=padding, split=split)
##
if padding == 'valid':
x_shape = x.get_shape().as_list()
l_shape = l.get_shape().as_list()
l = crop_op(l, (l_shape[2] - x_shape[2],
l_shape[3] - x_shape[3]))
l = tf.concat([l, x], axis=1)
l = BNReLU('blk_bna', l)
return l
####
示例3: encoder
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def encoder(i, freeze):
"""
Pre-activated ResNet50 Encoder
"""
d1 = Conv2D('conv0', i, 64, 7, padding='valid', strides=1, activation=BNReLU)
d1 = res_blk('group0', d1, [ 64, 64, 256], [1, 3, 1], 3, strides=1, freeze=freeze)
d2 = res_blk('group1', d1, [128, 128, 512], [1, 3, 1], 4, strides=2, freeze=freeze)
d2 = tf.stop_gradient(d2) if freeze else d2
d3 = res_blk('group2', d2, [256, 256, 1024], [1, 3, 1], 6, strides=2, freeze=freeze)
d3 = tf.stop_gradient(d3) if freeze else d3
d4 = res_blk('group3', d3, [512, 512, 2048], [1, 3, 1], 3, strides=2, freeze=freeze)
d4 = tf.stop_gradient(d4) if freeze else d4
d4 = Conv2D('conv_bot', d4, 1024, 1, padding='same')
return [d1, d2, d3, d4]
####
示例4: se_resnet_bottleneck
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def se_resnet_bottleneck(option, l, ch_out, stride, adl_index=None):
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True))
squeeze = GlobalAvgPooling('gap', l)
squeeze = FullyConnected('fc1',
squeeze, ch_out // 4, activation=tf.nn.relu)
squeeze = FullyConnected('fc2',
squeeze, ch_out * 4, activation=tf.nn.sigmoid)
ch_ax = 1 if is_data_format_nchw() else 3
shape = [-1, 1, 1, 1]
shape[ch_ax] = ch_out * 4
l = l * tf.reshape(squeeze, shape)
out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn())
out = tf.nn.relu(out)
if option.gating_position[adl_index]:
out = gating_op(out, option)
return out
示例5: se_bottleneck
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def se_bottleneck(l, ch_out, stride):
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True))
squeeze = GlobalAvgPooling('gap', l)
squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu)
squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid)
data_format = get_arg_scope()['Conv2D']['data_format']
ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3
shape = [-1, 1, 1, 1]
shape[ch_ax] = ch_out * 4
l = l * tf.reshape(squeeze, shape)
out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False))
return tf.nn.relu(out)
示例6: fastrcnn_Xconv1fc_head
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
"""
Args:
feature (NCHW):
num_classes(int): num_category + 1
num_convs (int): number of conv layers
norm (str or None): either None or 'GN'
Returns:
2D head feature
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l
示例7: rpn_head
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def rpn_head(featuremap, channel, num_anchors):
"""
Returns:
label_logits: fHxfWxNA
box_logits: fHxfWxNAx4
"""
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)):
hidden = Conv2D('conv0', featuremap, channel, 3, activation=tf.nn.relu)
label_logits = Conv2D('class', hidden, num_anchors, 1)
box_logits = Conv2D('box', hidden, 4 * num_anchors, 1)
# 1, NA(*4), im/16, im/16 (NCHW)
label_logits = tf.transpose(label_logits, [0, 2, 3, 1]) # 1xfHxfWxNA
label_logits = tf.squeeze(label_logits, 0) # fHxfWxNA
shp = tf.shape(box_logits) # 1x(NAx4)xfHxfW
box_logits = tf.transpose(box_logits, [0, 2, 3, 1]) # 1xfHxfWx(NAx4)
box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4])) # fHxfWxNAx4
return label_logits, box_logits
示例8: resnet_bottleneck
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def resnet_bottleneck(l, ch_out, stride):
shortcut = l
if cfg.BACKBONE.STRIDE_1X1:
if stride == 2:
l = l[:, :, :-1, :-1]
l = Conv2D('conv1', l, ch_out, 1, strides=stride)
l = Conv2D('conv2', l, ch_out, 3, strides=1)
else:
l = Conv2D('conv1', l, ch_out, 1, strides=1)
if stride == 2:
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = Conv2D('conv2', l, ch_out, 3, strides=2, padding='VALID')
else:
l = Conv2D('conv2', l, ch_out, 3, strides=stride)
if cfg.BACKBONE.NORM != 'None':
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_norm(zero_init=True))
else:
l = Conv2D('conv3', l, ch_out * 4, 1, activation=tf.identity,
kernel_initializer=tf.constant_initializer())
ret = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_norm(zero_init=False))
return tf.nn.relu(ret, name='output')
示例9: resnet_c4_backbone
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def resnet_c4_backbone(image, num_blocks):
assert len(num_blocks) == 3
freeze_at = cfg.BACKBONE.FREEZE_AT
with backbone_scope(freeze=freeze_at > 0):
l = tf.pad(image, [[0, 0], [0, 0], maybe_reverse_pad(2, 3), maybe_reverse_pad(2, 3)])
l = Conv2D('conv0', l, 64, 7, strides=2, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
with backbone_scope(freeze=freeze_at > 1):
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
with backbone_scope(freeze=False):
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
# 16x downsampling up to now
return c4
示例10: GhostModule
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def GhostModule(name, x, filters, kernel_size, dw_size, ratio, padding='SAME', strides=1, data_format='NHWC', use_bias=False,
activation=tf.identity):
with tf.variable_scope(name):
init_channels = math.ceil(filters / ratio)
x = Conv2D('conv1', x, init_channels, kernel_size, strides=strides, activation=activation, data_format=data_format,
kernel_initializer=kernel_initializer, use_bias=use_bias)
if ratio == 1:
return x #activation(x, name='output')
dw1 = MyDepthConv('dw1', x, [dw_size,dw_size], channel_mult=ratio-1, stride=1, data_format=data_format, activation=activation)
dw1 = dw1[:,:,:,:filters-init_channels] if data_format=='NHWC' else dw1[:,:filters-init_channels,:,:]
x = tf.concat([x, dw1], 3 if data_format=='NHWC' else 1)
return x
示例11: SELayer
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def SELayer(x, out_dim, ratio):
squeeze = utils.spatial_mean(x, keep_dims=True, scope='global_pool')
excitation = Conv2D('fc1', squeeze, int(out_dim / ratio), 1, strides=1, kernel_initializer=kernel_initializer,
data_format='NHWC', activation=None)
excitation = tf.nn.relu(excitation, name='relu')
excitation = Conv2D('fc2', excitation, out_dim, 1, strides=1, kernel_initializer=kernel_initializer,
data_format='NHWC', activation=None)
excitation = tf.clip_by_value(excitation, 0, 1, name='hsigmoid')
scale = x * excitation
return scale
示例12: decoder
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def decoder(name, i):
pad = 'valid' # to prevent boundary artifacts
with tf.variable_scope(name):
with tf.variable_scope('u3'):
u3 = upsample2x('rz', i[-1])
u3_sum = tf.add_n([u3, i[-2]])
u3 = Conv2D('conva', u3_sum, 256, 5, strides=1, padding=pad)
u3 = dense_blk('dense', u3, [128, 32], [1, 5], 8, split=4, padding=pad)
u3 = Conv2D('convf', u3, 512, 1, strides=1)
####
with tf.variable_scope('u2'):
u2 = upsample2x('rz', u3)
u2_sum = tf.add_n([u2, i[-3]])
u2x = Conv2D('conva', u2_sum, 128, 5, strides=1, padding=pad)
u2 = dense_blk('dense', u2x, [128, 32], [1, 5], 4, split=4, padding=pad)
u2 = Conv2D('convf', u2, 256, 1, strides=1)
####
with tf.variable_scope('u1'):
u1 = upsample2x('rz', u2)
u1_sum = tf.add_n([u1, i[-4]])
u1 = Conv2D('conva', u1_sum, 64, 5, strides=1, padding='same')
return [u3, u2x, u1]
####
示例13: conv_with_rn
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def conv_with_rn(gradient):
out = Conv2D('conv', gradient, gradient.get_shape()[3], 1, strides=1, activation=get_rn(),
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0))
gradient = gradient + out
return gradient
示例14: is_data_format_nchw
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def is_data_format_nchw():
data_format = get_arg_scope()['Conv2D']['data_format']
return data_format in ['NCHW', 'channels_first']
示例15: resnet
# 需要导入模块: from tensorpack import models [as 别名]
# 或者: from tensorpack.models import Conv2D [as 别名]
def resnet(input_, option):
mode = option.mode
DEPTH = option.depth
bottleneck = {'se': se_resnet_bottleneck}[mode]
cfg = {
50: ([3, 4, 6, 3], bottleneck),
}
defs, block_func = cfg[DEPTH]
group_func = resnet_group
with argscope(Conv2D, use_bias=False, kernel_initializer= \
tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm],
data_format='channels_first'):
l = Conv2D('conv0', input_, 64, 7, strides=2, activation=BNReLU)
if option.gating_position[0]: l = gating_op(l, option)
l = MaxPooling('pool0', l, 3, strides=2, padding='SAME')
if option.gating_position[1]: l = gating_op(l, option)
l = group_func('group0', l, block_func, 64, defs[0], 1, option)
if option.gating_position[2]: l = gating_op(l, option)
l = group_func('group1', l, block_func, 128, defs[1], 2, option)
if option.gating_position[3]: l = gating_op(l, option)
l = group_func('group2', l, block_func, 256, defs[2], 2, option)
if option.gating_position[4]: l = gating_op(l, option)
l = group_func('group3', l, block_func, 512, defs[3],
1, option)
if option.gating_position[5]: l = gating_op(l, option)
p_logits = GlobalAvgPooling('gap', l)
logits = FullyConnected('linearnew', p_logits, option.number_of_class)
return logits, l