本文整理汇总了Python中object_detection.core.keypoint_ops.clip_to_window方法的典型用法代码示例。如果您正苦于以下问题:Python keypoint_ops.clip_to_window方法的具体用法?Python keypoint_ops.clip_to_window怎么用?Python keypoint_ops.clip_to_window使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.core.keypoint_ops
的用法示例。
在下文中一共展示了keypoint_ops.clip_to_window方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_clip_to_window
# 需要导入模块: from object_detection.core import keypoint_ops [as 别名]
# 或者: from object_detection.core.keypoint_ops import clip_to_window [as 别名]
def test_clip_to_window(self):
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.25], [0.75, 0.75]]
])
output = keypoint_ops.clip_to_window(keypoints, window)
with self.test_session() as sess:
output_, expected_keypoints_ = sess.run([output, expected_keypoints])
self.assertAllClose(output_, expected_keypoints_)
示例2: test_clip_to_window
# 需要导入模块: from object_detection.core import keypoint_ops [as 别名]
# 或者: from object_detection.core.keypoint_ops import clip_to_window [as 别名]
def test_clip_to_window(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.25], [0.75, 0.75]]
])
output = keypoint_ops.clip_to_window(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
示例3: convert_strided_predictions_to_normalized_boxes
# 需要导入模块: from object_detection.core import keypoint_ops [as 别名]
# 或者: from object_detection.core.keypoint_ops import clip_to_window [as 别名]
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
def _normalize_boxlist(args):
boxes, height, width = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.],
filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist,
zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for
box_list_instance in box_lists], axis=0)
return boxes