本文整理汇总了Python中object_detection.eval_util.get_eval_metric_ops_for_evaluators方法的典型用法代码示例。如果您正苦于以下问题:Python eval_util.get_eval_metric_ops_for_evaluators方法的具体用法?Python eval_util.get_eval_metric_ops_for_evaluators怎么用?Python eval_util.get_eval_metric_ops_for_evaluators使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.eval_util
的用法示例。
在下文中一共展示了eval_util.get_eval_metric_ops_for_evaluators方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_eval_metric_ops_for_coco_detections
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1,
max_gt_boxes=None,
scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['coco_detection_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op = metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
示例2: test_get_eval_metric_ops_for_coco_detections_and_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_masks(
self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例3: test_get_eval_metric_ops_for_coco_detections_and_resized_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(
self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute,
resized_groundtruth_masks=True)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例4: test_get_eval_metric_ops_for_coco_detections
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections(self):
evaluation_metrics = ['coco_detection_metrics']
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
evaluation_metrics, categories, eval_dict)
_, update_op = metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
print(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
示例5: test_get_eval_metric_ops_for_coco_detections_and_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_masks(self):
evaluation_metrics = ['coco_detection_metrics',
'coco_mask_metrics']
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
evaluation_metrics, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例6: test_get_eval_metric_ops_for_coco_detections
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['coco_detection_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op = metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
print(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
示例7: test_get_eval_metric_ops_for_coco_detections_and_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_masks(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例8: test_get_eval_metric_ops_for_coco_detections_and_resized_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(resized_groundtruth_masks=True)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in metric_ops.iteritems():
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例9: test_get_eval_metric_ops_for_coco_detections
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1,
max_gt_boxes=None,
scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['coco_detection_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op = metric_ops['DetectionBoxes_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
示例10: test_get_eval_metric_ops_for_coco_detections_and_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_masks(
self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例11: test_get_eval_metric_ops_for_coco_detections_and_resized_masks
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(
self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(
['coco_detection_metrics', 'coco_mask_metrics'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict(batch_size=batch_size,
max_gt_boxes=max_gt_boxes,
scale_to_absolute=scale_to_absolute,
resized_groundtruth_masks=True)
metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
_, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
_, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
with self.test_session() as sess:
metrics = {}
for key, (value_op, _) in six.iteritems(metric_ops):
metrics[key] = value_op
sess.run(update_op_boxes)
sess.run(update_op_masks)
metrics = sess.run(metrics)
self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
示例12: test_get_eval_metric_ops_raises_error_with_unsupported_metric
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self):
eval_config = eval_pb2.EvalConfig()
eval_config.metrics_set.extend(['unsupported_metric'])
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
with self.assertRaises(ValueError):
eval_util.get_eval_metric_ops_for_evaluators(
eval_config, categories, eval_dict)
示例13: test_get_eval_metric_ops_raises_error_with_unsupported_metric
# 需要导入模块: from object_detection import eval_util [as 别名]
# 或者: from object_detection.eval_util import get_eval_metric_ops_for_evaluators [as 别名]
def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self):
evaluation_metrics = ['unsupported_metrics']
categories = self._get_categories_list()
eval_dict = self._make_evaluation_dict()
with self.assertRaises(ValueError):
eval_util.get_eval_metric_ops_for_evaluators(
evaluation_metrics, categories, eval_dict)