本文整理汇总了Python中object_detection.utils.static_shape.get_depth方法的典型用法代码示例。如果您正苦于以下问题:Python static_shape.get_depth方法的具体用法?Python static_shape.get_depth怎么用?Python static_shape.get_depth使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.utils.static_shape
的用法示例。
在下文中一共展示了static_shape.get_depth方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_return_correct_depth
# 需要导入模块: from object_detection.utils import static_shape [as 别名]
# 或者: from object_detection.utils.static_shape import get_depth [as 别名]
def test_return_correct_depth(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
self.assertEqual(3, static_shape.get_depth(tensor_shape))
示例2: test_die_on_tensor_shape_with_rank_three
# 需要导入模块: from object_detection.utils import static_shape [as 别名]
# 或者: from object_detection.utils.static_shape import get_depth [as 别名]
def test_die_on_tensor_shape_with_rank_three(self):
tensor_shape = tf.TensorShape(dims=[32, 299, 384])
with self.assertRaises(ValueError):
static_shape.get_batch_size(tensor_shape)
static_shape.get_height(tensor_shape)
static_shape.get_width(tensor_shape)
static_shape.get_depth(tensor_shape)
示例3: build
# 需要导入模块: from object_detection.utils import static_shape [as 别名]
# 或者: from object_detection.utils.static_shape import get_depth [as 别名]
def build(self, input_shapes):
"""Creates the variables of the layer."""
if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]):
raise ValueError('This box predictor was constructed with %d heads,'
'but there are %d inputs.' %
(len(self._prediction_heads[BOX_ENCODINGS]),
len(input_shapes)))
for stack_index, input_shape in enumerate(input_shapes):
net = []
# Add additional conv layers before the class predictor.
features_depth = static_shape.get_depth(input_shape)
depth = max(min(features_depth, self._max_depth), self._min_depth)
tf.logging.info(
'depth of additional conv before box predictor: {}'.format(depth))
if depth > 0 and self._num_layers_before_predictor > 0:
for i in range(self._num_layers_before_predictor):
net.append(keras.Conv2D(depth, [1, 1],
name='SharedConvolutions_%d/Conv2d_%d_1x1_%d'
% (stack_index, i, depth),
padding='SAME',
**self._conv_hyperparams.params()))
net.append(self._conv_hyperparams.build_batch_norm(
training=(self._is_training and not self._freeze_batchnorm),
name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm'
% (stack_index, i, depth)))
net.append(self._conv_hyperparams.build_activation_layer(
name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation'
% (stack_index, i, depth),
))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self._shared_nets.append(net)
self.built = True