本文整理汇总了Python中tensorflow.contrib.slim.nets.resnet_v1.resnet_v1方法的典型用法代码示例。如果您正苦于以下问题:Python resnet_v1.resnet_v1方法的具体用法?Python resnet_v1.resnet_v1怎么用?Python resnet_v1.resnet_v1使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim.nets.resnet_v1
的用法示例。
在下文中一共展示了resnet_v1.resnet_v1方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testEndPointsV1
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
bottleneck = resnet_v1.bottleneck
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
示例2: _resnet_small
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def _resnet_small(self,
inputs,
num_classes=None,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
bottleneck = resnet_v1.bottleneck
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(4, 1, 1)] * 2 + [(4, 1, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(8, 2, 1)] * 2 + [(8, 2, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(16, 4, 1)] * 2 + [(16, 4, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(32, 8, 1)] * 2)]
return resnet_v1.resnet_v1(inputs, blocks, num_classes, global_pool,
output_stride, include_root_block, reuse, scope)
示例3: restnet_head
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def restnet_head(input, is_training, scope_name):
block4 = [resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)]
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
C5, _ = resnet_v1.resnet_v1(input,
block4,
global_pool=False,
include_root_block=False,
scope=scope_name)
# C5 = tf.Print(C5, [tf.shape(C5)], summarize=10, message='C5_shape')
C5_flatten = tf.reduce_mean(C5, axis=[1, 2], keep_dims=False, name='global_average_pooling')
# C5_flatten = tf.Print(C5_flatten, [tf.shape(C5_flatten)], summarize=10, message='C5_flatten_shape')
# global average pooling C5 to obtain fc layers
return C5_flatten
示例4: GetGamma
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def GetGamma(self, short_name):
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/BatchNorm/gamma')
return self._gammas[name]
示例5: GetOp
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def GetOp(self, short_name):
if short_name == 'FC':
return tf.get_default_graph().get_operation_by_name('FC/MatMul')
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/Conv2D')
return tf.get_default_graph().get_operation_by_name(name)
示例6: resnet_arg_scope
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def resnet_arg_scope(is_training=True,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
# NOTE 'is_training' here does not work because inside resnet it gets reset:
# https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': cfg.RESNET.BN_TRAIN,
'updates_collections': ops.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
trainable=is_training,
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
示例7: testAtrousValuesBottleneck
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def testAtrousValuesBottleneck(self):
self._atrousValues(resnet_v1.bottleneck)
示例8: testSuccessResnetV1
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def testSuccessResnetV1(self):
build_resnet(resnet_v1.resnet_v1_block, resnet_v1.resnet_v1)
mapper = gamma_mapper.ConvGammaMapperByConnectivity()
# Here the mapping between convolutions and batch-norms is simple one to
# one.
for block in (1, 2):
self.assertGammaMatchesConv(
mapper, 'resnet_v1/block%d/unit_1/bottleneck_v1/shortcut' % block)
for unit in (1, 2):
for conv in (1, 2, 3):
self.assertGammaMatchesConv(
mapper, 'resnet_v1/block%d/unit_%d/bottleneck_v1/conv%d' %
(block, unit, conv))
示例9: getGamma
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def getGamma(self, short_name):
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/BatchNorm/gamma')
return self._gammas[name]
示例10: getOp
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def getOp(self, short_name):
if short_name == 'FC':
return tf.get_default_graph().get_operation_by_name('FC/MatMul')
tokens = short_name.split('/')
name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
'/Conv2D')
return tf.get_default_graph().get_operation_by_name(name)
示例11: resnet_base
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def resnet_base(img_batch, scope_name, is_training=True):
'''
this code is derived from light-head rcnn.
https://github.com/zengarden/light_head_rcnn
It is convenient to freeze blocks. So we adapt this mode.
'''
if scope_name == 'resnet_v1_50':
middle_num_units = 6
elif scope_name == 'resnet_v1_101':
middle_num_units = 23
else:
raise NotImplementedError('We only support resnet_v1_50 or resnet_v1_101. Check your network name....yjr')
blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
# use stride 1 for the last conv4 layer.
resnet_v1_block('block3', base_depth=256, num_units=middle_num_units, stride=1)]
# when use fpn . stride list is [1, 2, 2]
with slim.arg_scope(resnet_arg_scope(is_training=False)):
with tf.variable_scope(scope_name, scope_name):
# Do the first few layers manually, because 'SAME' padding can behave inconsistently
# for images of different sizes: sometimes 0, sometimes 1
net = resnet_utils.conv2d_same(
img_batch, 64, 7, stride=2, scope='conv1')
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='pool1')
not_freezed = [False] * cfgs.FIXED_BLOCKS + (4-cfgs.FIXED_BLOCKS)*[True]
# Fixed_Blocks can be 1~3
with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[0]))):
C2, _ = resnet_v1.resnet_v1(net,
blocks[0:1],
global_pool=False,
include_root_block=False,
scope=scope_name)
# C2 = tf.Print(C2, [tf.shape(C2)], summarize=10, message='C2_shape')
with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[1]))):
C3, _ = resnet_v1.resnet_v1(C2,
blocks[1:2],
global_pool=False,
include_root_block=False,
scope=scope_name)
# C3 = tf.Print(C3, [tf.shape(C3)], summarize=10, message='C3_shape')
with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[2]))):
C4, _ = resnet_v1.resnet_v1(C3,
blocks[2:3],
global_pool=False,
include_root_block=False,
scope=scope_name)
# C4 = tf.Print(C4, [tf.shape(C4)], summarize=10, message='C4_shape')
return C4
示例12: testCost
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def testCost(self):
self.BuildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
self.InitGamma()
res_alive = np.logical_or(
np.logical_or(
self.GetGamma('unit_1/shortcut') > self._threshold,
self.GetGamma('unit_1/conv3') > self._threshold),
self.GetGamma('unit_2/conv3') > self._threshold)
self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
[self.net.op], self._threshold)
expected = {}
expected['unit_1/shortcut'] = (
self.GetCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
expected['unit_1/conv1'] = (
self.GetCoeff('unit_1/conv1') * self.NumAlive('unit_1/conv1') *
NUM_CHANNELS)
expected['unit_1/conv2'] = (
self.GetCoeff('unit_1/conv2') * self.NumAlive('unit_1/conv2') *
self.NumAlive('unit_1/conv1'))
expected['unit_1/conv3'] = (
self.GetCoeff('unit_1/conv3') * np.sum(res_alive) *
self.NumAlive('unit_1/conv2'))
expected['unit_2/conv1'] = (
self.GetCoeff('unit_2/conv1') * self.NumAlive('unit_2/conv1') *
np.sum(res_alive))
expected['unit_2/conv2'] = (
self.GetCoeff('unit_2/conv2') * self.NumAlive('unit_2/conv2') *
self.NumAlive('unit_2/conv1'))
expected['unit_2/conv3'] = (
self.GetCoeff('unit_2/conv3') * np.sum(res_alive) *
self.NumAlive('unit_2/conv2'))
expected['FC'] = 2.0 * np.sum(res_alive) * 23.0
# TODO(e1): Is there a way to use Parametrized Tests to make this more
# elegant?
with self.cached_session():
for short_name in expected:
cost = self.gamma_flop_reg.get_cost([self.GetOp(short_name)]).eval()
self.assertEqual(expected[short_name], cost)
self.assertEqual(
sum(expected.values()),
self.gamma_flop_reg.get_cost().eval())
示例13: testCost
# 需要导入模块: from tensorflow.contrib.slim.nets import resnet_v1 [as 别名]
# 或者: from tensorflow.contrib.slim.nets.resnet_v1 import resnet_v1 [as 别名]
def testCost(self):
self.buildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
self.initGamma()
res_alive = np.logical_or(
np.logical_or(
self.getGamma('unit_1/shortcut') > self._threshold,
self.getGamma('unit_1/conv3') > self._threshold),
self.getGamma('unit_2/conv3') > self._threshold)
self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
[self.net.op], self._threshold)
expected = {}
expected['unit_1/shortcut'] = (
self.getCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
expected['unit_1/conv1'] = (
self.getCoeff('unit_1/conv1') * self.numAlive('unit_1/conv1') *
NUM_CHANNELS)
expected['unit_1/conv2'] = (
self.getCoeff('unit_1/conv2') * self.numAlive('unit_1/conv2') *
self.numAlive('unit_1/conv1'))
expected['unit_1/conv3'] = (
self.getCoeff('unit_1/conv3') * np.sum(res_alive) *
self.numAlive('unit_1/conv2'))
expected['unit_2/conv1'] = (
self.getCoeff('unit_2/conv1') * self.numAlive('unit_2/conv1') *
np.sum(res_alive))
expected['unit_2/conv2'] = (
self.getCoeff('unit_2/conv2') * self.numAlive('unit_2/conv2') *
self.numAlive('unit_2/conv1'))
expected['unit_2/conv3'] = (
self.getCoeff('unit_2/conv3') * np.sum(res_alive) *
self.numAlive('unit_2/conv2'))
expected['FC'] = 2.0 * np.sum(res_alive) * 23.0
# TODO: Is there a way to use Parametrized Tests to make this more
# elegant?
with self.test_session():
for short_name in expected:
cost = self.gamma_flop_reg.get_cost([self.getOp(short_name)]).eval()
self.assertEqual(expected[short_name], cost)
self.assertEqual(
sum(expected.values()),
self.gamma_flop_reg.get_cost().eval())