本文整理汇总了Python中tensorpack.tfutils.argscope.get_arg_scope方法的典型用法代码示例。如果您正苦于以下问题:Python argscope.get_arg_scope方法的具体用法?Python argscope.get_arg_scope怎么用?Python argscope.get_arg_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.tfutils.argscope
的用法示例。
在下文中一共展示了argscope.get_arg_scope方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: alexnet_backbone
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def alexnet_backbone(image, qw=1):
with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
W_init=tf.random_normal_initializer(stddev=0.01),
data_format=get_arg_scope()['Conv2D']['data_format'],
nbit=qw):
logits = (LinearWrap(image)
.Conv2DQuant('conv1', 96, 11, stride=4, is_quant=False, padding='VALID')
.MaxPooling('pool1', shape=3, stride=2, padding='VALID')
.BNReLUQuant('bnquant2')
.Conv2DQuant('conv2', 256, 5)
.MaxPooling('pool2', shape=3, stride=2, padding='VALID')
.BNReLUQuant('bnquant3')
.Conv2DQuant('conv3', 384, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv4', 384, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv5', 256, 3)
.MaxPooling('pool5', shape=3, stride=2, padding='VALID')
.BNReLUQuant('bnquant6')
.Conv2DQuant('fc6', 4096, 6, nl=getfcBNReLUQuant, padding='VALID', W_init=tf.random_normal_initializer(stddev=0.005), use_bias=True)
.Conv2DQuant('fc7', 4096, 1, nl=getfcBNReLU, padding='VALID', W_init=tf.random_normal_initializer(stddev=0.005), use_bias=True)
.FullyConnected('fc8', out_dim=1000, nl=tf.identity, W_init=tf.random_normal_initializer(stddev=0.01))())
return logits
示例2: densenet_backbone
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def densenet_backbone(image, qw=1):
with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_IN'),
data_format=get_arg_scope()['Conv2D']['data_format'],
nbit=qw,
is_quant=True if qw > 0 else False):
logits = (LinearWrap(image)
.Conv2DQuant('conv1', 2 * GROWTH_RATE, 7, stride=2, nl=BNReLU, is_quant=False)
.MaxPooling('pool1', shape=3, stride=2, padding='SAME')
# 56
.apply(add_dense_block, 'block0', 6)
# 28
.apply(add_dense_block, 'block1', 12)
# 14
.apply(add_dense_block, 'block2', 24)
# 7
.apply(add_dense_block, 'block3', 16, last=True)
.BNReLU('bnrelu_last')
.GlobalAvgPooling('gap')
.FullyConnected('linear', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))())
return logits
示例3: inception_block
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def inception_block(l, name, ch_1x1, ch_3x3, ch_5x5, is_last_block=False, is_last=False):
data_format = get_arg_scope()['Conv2DQuant']['data_format']
with tf.variable_scope(name):
conv1x1 = Conv2DQuant('1x1', l, ch_1x1, 1, nl=getBNReLUQuant if not is_last_block else tf.identity)
conv3x3_reduce = Conv2DQuant('3x3_reduce', l, ch_3x3, 1, nl=getBNReLUQuant)
conv3x3 = Conv2DQuant('3x3', conv3x3_reduce, ch_3x3, 3, nl=getBNReLUQuant if not is_last_block else tf.identity)
conv5x5_reduce = Conv2DQuant('5x5_reduce', l, ch_5x5, 1, nl=getBNReLUQuant)
conv5x5 = Conv2DQuant('5x5', conv5x5_reduce, ch_5x5, 5, nl=getBNReLUQuant if not is_last_block else tf.identity)
if is_last_block and not is_last:
conv1x1 = MaxPooling('pool_1x1', conv1x1, shape=3, stride=2, padding='SAME')
conv1x1 = BNReLU('conv1x1_bn', conv1x1)
conv1x1 = QuantizedActiv('conv1x1_quant', conv1x1)
conv3x3 = MaxPooling('pool_3x3', conv3x3, shape=3, stride=2, padding='SAME')
conv3x3 = BNReLU('conv3x3_bn', conv3x3)
conv3x3 = QuantizedActiv('conv3x3_quant', conv3x3)
conv5x5 = MaxPooling('pool_5x5', conv5x5, shape=3, stride=2, padding='SAME')
conv5x5 = BNReLU('conv5x5_bn', conv5x5)
conv5x5 = QuantizedActiv('conv5x5_quant', conv5x5)
l = tf.concat([
conv1x1,
conv3x3,
conv5x5], 1 if data_format == 'NCHW' else 3, name='concat')
if is_last:
l = BNReLU('output_bn', l)
return l
示例4: se_resnet_bottleneck
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def se_resnet_bottleneck(l, ch_out, stride):
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True))
squeeze = GlobalAvgPooling('gap', l)
squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu)
squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid)
data_format = get_arg_scope()['Conv2D']['data_format']
ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3
shape = [-1, 1, 1, 1]
shape[ch_ax] = ch_out * 4
l = l * tf.reshape(squeeze, shape)
out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False))
return tf.nn.relu(out)
示例5: se_bottleneck
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def se_bottleneck(l, ch_out, stride):
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True))
squeeze = GlobalAvgPooling('gap', l)
squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu)
squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid)
data_format = get_arg_scope()['Conv2D']['data_format']
ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3
shape = [-1, 1, 1, 1]
shape[ch_ax] = ch_out * 4
l = l * tf.reshape(squeeze, shape)
out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False))
return tf.nn.relu(out)
示例6: resnet_shortcut
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def resnet_shortcut(l, n_out, stride, nl=tf.identity):
data_format = get_arg_scope()['Conv2D']['data_format']
n_in = l.get_shape().as_list()[1 if data_format == 'NCHW' else 3]
if n_in != n_out: # change dimension when channel is not the same
return Conv2D('convshortcut', l, n_out, 1, stride=stride, nl=nl)
else:
return l
示例7: se_resnet_bottleneck
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def se_resnet_bottleneck(l, ch_out, stride):
shortcut = l
l = Conv2D('conv1', l, ch_out, 1, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1, nl=get_bn(zero_init=True))
squeeze = GlobalAvgPooling('gap', l)
squeeze = FullyConnected('fc1', squeeze, ch_out // 4, nl=tf.nn.relu)
squeeze = FullyConnected('fc2', squeeze, ch_out * 4, nl=tf.nn.sigmoid)
data_format = get_arg_scope()['Conv2D']['data_format']
ch_ax = 1 if data_format == 'NCHW' else 3
shape = [-1, 1, 1, 1]
shape[ch_ax] = ch_out * 4
l = l * tf.reshape(squeeze, shape)
return l + resnet_shortcut(shortcut, ch_out * 4, stride, nl=get_bn(zero_init=False))
示例8: googlenet_backbone
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def googlenet_backbone(image, qw=1):
with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_IN'),
data_format=get_arg_scope()['Conv2D']['data_format'],
nbit=qw,
is_quant=True if qw > 0 else False):
logits = (LinearWrap(image)
.Conv2DQuant('conv1', 64, 7, stride=2, is_quant=False)
.MaxPooling('pool1', shape=3, stride=2, padding='SAME')
.BNReLUQuant('pool1/out')
.Conv2DQuant('conv2/3x3_reduce', 192, 1, nl=getBNReLUQuant)
.Conv2DQuant('conv2/3x3', 192, 3)
.MaxPooling('pool2', shape=3, stride=2, padding='SAME')
.BNReLUQuant('pool2/out')
.apply(inception_block, 'incpetion_3a', 96, 128, 32)
.apply(inception_block, 'incpetion_3b', 192, 192, 96, is_last_block=True)
.apply(inception_block, 'incpetion_4a', 256, 208, 48)
.apply(inception_block, 'incpetion_4b', 224, 224, 64)
.apply(inception_block, 'incpetion_4c', 192, 256, 64)
.apply(inception_block, 'incpetion_4d', 176, 288, 64)
.apply(inception_block, 'incpetion_4e', 384, 320, 128, is_last_block=True)
.apply(inception_block, 'incpetion_5a', 384, 320, 128)
.apply(inception_block, 'incpetion_5b', 512, 384, 128, is_last_block=True, is_last=True)
.GlobalAvgPooling('pool5')
.FullyConnected('linear', out_dim=1000, nl=tf.identity)())
return logits
示例9: vgg_backbone
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def vgg_backbone(image, qw=1):
with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_IN'),
data_format=get_arg_scope()['Conv2D']['data_format'],
nbit=qw):
logits = (LinearWrap(image)
.Conv2DQuant('conv1', 96, 7, stride=2, nl=tf.nn.relu, is_quant=False)
.MaxPooling('pool1', shape=2, stride=2, padding='VALID')
# 56
.BNReLUQuant('bnquant2_0')
.Conv2DQuant('conv2_1', 256, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv2_2', 256, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv2_3', 256, 3)
.MaxPooling('pool2', shape=2, stride=2, padding='VALID')
# 28
.BNReLUQuant('bnquant3_0')
.Conv2DQuant('conv3_1', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv3_2', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv3_3', 512, 3)
.MaxPooling('pool3', shape=2, stride=2, padding='VALID')
# 14
.BNReLUQuant('bnquant4_0')
.Conv2DQuant('conv4_1', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv4_2', 512, 3, nl=getBNReLUQuant)
.Conv2DQuant('conv4_3', 512, 3)
.MaxPooling('pool4', shape=2, stride=2, padding='VALID')
# 7
.BNReLUQuant('bnquant5')
.Conv2DQuant('fc5', 4096, 7, nl=getfcBNReLUQuant, padding='VALID', use_bias=True)
.Conv2DQuant('fc6', 4096, 1, nl=getfcBNReLU, padding='VALID', use_bias=True)
.FullyConnected('fc7', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))())
return logits
示例10: resnet_backbone
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def resnet_backbone(image, num_blocks, group_func, block_func, qw=1):
with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT'),
data_format=get_arg_scope()['Conv2D']['data_format'],
nbit=qw):
logits = (LinearWrap(image)
.Conv2DQuant('conv0', 64, 7, stride=2, nl=BNReLU, is_quant=False)
.MaxPooling('pool0', shape=3, stride=2, padding='SAME')
.apply(group_func, 'group0', block_func, 64, num_blocks[0], 1)
.apply(group_func, 'group1', block_func, 128, num_blocks[1], 2)
.apply(group_func, 'group2', block_func, 256, num_blocks[2], 2)
.apply(group_func, 'group3', block_func, 512, num_blocks[3], 2, is_last=True)
.GlobalAvgPooling('gap')
.FullyConnected('linear', 1000, nl=tf.identity)())
return logits
示例11: is_data_format_nchw
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def is_data_format_nchw():
data_format = get_arg_scope()['Conv2D']['data_format']
return data_format in ['NCHW', 'channels_first']
示例12: resnet_shortcut
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def resnet_shortcut(l, n_out, stride, nl=tf.identity):
data_format = get_arg_scope()['Conv2D']['data_format']
n_in = l.get_shape().as_list()[1 if data_format == 'NCHW' else 3]
if n_in != n_out: # change dimension when channel is not the same
if stride == 2:
l = l[:, :, :-1, :-1]
return Conv2D('convshortcut', l, n_out, 1,
stride=stride, padding='VALID', nl=nl)
else:
return Conv2D('convshortcut', l, n_out, 1,
stride=stride, nl=nl)
else:
return l
示例13: resnet_shortcut
# 需要导入模块: from tensorpack.tfutils import argscope [as 别名]
# 或者: from tensorpack.tfutils.argscope import get_arg_scope [as 别名]
def resnet_shortcut(l, n_out, stride, activation=tf.identity):
data_format = get_arg_scope()['Conv2D']['data_format']
n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3]
if n_in != n_out: # change dimension when channel is not the same
return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation)
else:
return l