本文整理汇总了Python中object_detection.utils.shape_utils.static_or_dynamic_map_fn方法的典型用法代码示例。如果您正苦于以下问题:Python shape_utils.static_or_dynamic_map_fn方法的具体用法?Python shape_utils.static_or_dynamic_map_fn怎么用?Python shape_utils.static_or_dynamic_map_fn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.utils.shape_utils
的用法示例。
在下文中一共展示了shape_utils.static_or_dynamic_map_fn方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _tf_example_input_placeholder
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def _tf_example_input_placeholder():
"""Returns input that accepts a batch of strings with tf examples.
Returns:
a tuple of input placeholder and the output decoded images.
"""
batch_tf_example_placeholder = tf.placeholder(
tf.string, shape=[None], name='tf_example')
def decode(tf_example_string_tensor):
tensor_dict = tf_example_decoder.TfExampleDecoder().decode(
tf_example_string_tensor)
image_tensor = tensor_dict[fields.InputDataFields.image]
return image_tensor
return (batch_tf_example_placeholder,
shape_utils.static_or_dynamic_map_fn(
decode,
elems=batch_tf_example_placeholder,
dtype=tf.uint8,
parallel_iterations=32,
back_prop=False))
示例2: test_with_dynamic_shape
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def test_with_dynamic_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
input_tensor = tf.placeholder(tf.float32, shape=(None, 2))
map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(any(['map' == op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result1 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[1, 2], [3, 1], [0, 4]]})
result2 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[-1, 1], [0, 9]]})
self.assertAllEqual(result1, [3, 4, 4])
self.assertAllEqual(result2, [0, 9])
示例3: test_with_multiple_static_shapes
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def test_with_multiple_static_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
input_tensor = tf.constant([[1, 2, 3], [4, 5, -1], [0, 6, 9]],
dtype=tf.float32)
scalar_index_tensor = tf.constant([[0], [2], [1]], dtype=tf.int32)
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(all(['map' != op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result = sess.run(map_fn_output)
self.assertAllEqual(result, [1, -1, 6])
示例4: normalized_to_image_coordinates
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def normalized_to_image_coordinates(normalized_boxes, image_shape,
parallel_iterations=32):
"""Converts a batch of boxes from normal to image coordinates.
Args:
normalized_boxes: a float32 tensor of shape [None, num_boxes, 4] in
normalized coordinates.
image_shape: a float32 tensor of shape [4] containing the image shape.
parallel_iterations: parallelism for the map_fn op.
Returns:
absolute_boxes: a float32 tensor of shape [None, num_boxes, 4] containing
the boxes in image coordinates.
"""
x_scale = tf.cast(image_shape[2], tf.float32)
y_scale = tf.cast(image_shape[1], tf.float32)
def _to_absolute_coordinates(normalized_boxes):
y_min, x_min, y_max, x_max = tf.split(
value=normalized_boxes, num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxes = tf.concat([y_min, x_min, y_max, x_max], 1)
return scaled_boxes
absolute_boxes = shape_utils.static_or_dynamic_map_fn(
_to_absolute_coordinates,
elems=(normalized_boxes),
dtype=tf.float32,
parallel_iterations=parallel_iterations,
back_prop=True)
return absolute_boxes
示例5: test_with_static_shape
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def test_with_static_shape(self):
def fn(input_tensor):
return tf.reduce_sum(input_tensor)
input_tensor = tf.constant([[1, 2], [3, 1], [0, 4]], dtype=tf.float32)
map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(all(['map' != op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result = sess.run(map_fn_output)
self.assertAllEqual(result, [3, 4, 4])
示例6: test_fails_with_nested_input
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def test_fails_with_nested_input(self):
def fn(input_tensor):
return input_tensor
input_tensor1 = tf.constant([1])
input_tensor2 = tf.constant([2])
with self.assertRaisesRegexp(
ValueError, '`elems` must be a Tensor or list of Tensors.'):
shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor1, [input_tensor2]], dtype=tf.float32)
示例7: preprocess
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
SSD meta architecture uses a default clip_window of [0, 0, 1, 1] during
post-processing. On calling `preprocess` method, clip_window gets updated
based on `true_image_shapes` returned by `image_resizer_fn`.
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = shape_utils.static_or_dynamic_map_fn(
self._image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
示例8: preprocess
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
For Faster R-CNN, we perform image resizing in the base class --- each
class subclassing FasterRCNNMetaArch is responsible for any additional
preprocessing (e.g., scaling pixel values to be in [-1, 1]).
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
outputs = shape_utils.static_or_dynamic_map_fn(
self._image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32],
parallel_iterations=self._parallel_iterations)
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
示例9: normalized_to_image_coordinates
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def normalized_to_image_coordinates(normalized_boxes, image_shape,
parallel_iterations=32):
"""Converts a batch of boxes from normal to image coordinates.
Args:
normalized_boxes: a float32 tensor of shape [None, num_boxes, 4] in
normalized coordinates.
image_shape: a float32 tensor of shape [4] containing the image shape.
parallel_iterations: parallelism for the map_fn op.
Returns:
absolute_boxes: a float32 tensor of shape [None, num_boxes, 4] containg the
boxes in image coordinates.
"""
def _to_absolute_coordinates(normalized_boxes):
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(normalized_boxes),
image_shape[1], image_shape[2], check_range=False).get()
absolute_boxes = shape_utils.static_or_dynamic_map_fn(
_to_absolute_coordinates,
elems=(normalized_boxes),
dtype=tf.float32,
parallel_iterations=parallel_iterations,
back_prop=True)
return absolute_boxes
示例10: test_with_multiple_dynamic_shapes
# 需要导入模块: from object_detection.utils import shape_utils [as 别名]
# 或者: from object_detection.utils.shape_utils import static_or_dynamic_map_fn [as 别名]
def test_with_multiple_dynamic_shapes(self):
def fn(elems):
input_tensor, scalar_index_tensor = elems
return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])
input_tensor = tf.placeholder(tf.float32, shape=(None, 3))
scalar_index_tensor = tf.placeholder(tf.int32, shape=(None, 1))
map_fn_output = shape_utils.static_or_dynamic_map_fn(
fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)
op_names = [op.name for op in tf.get_default_graph().get_operations()]
self.assertTrue(any(['map' == op_name[:3] for op_name in op_names]))
with self.test_session() as sess:
result1 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[1, 2, 3], [4, 5, -1], [0, 6, 9]],
scalar_index_tensor: [[0], [2], [1]],
})
result2 = sess.run(
map_fn_output, feed_dict={
input_tensor: [[-1, 1, 0], [3, 9, 30]],
scalar_index_tensor: [[1], [0]]
})
self.assertAllEqual(result1, [1, -1, 6])
self.assertAllEqual(result2, [1, 3])