本文整理汇总了Python中object_detection.utils.metrics.compute_precision_recall方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.compute_precision_recall方法的具体用法?Python metrics.compute_precision_recall怎么用?Python metrics.compute_precision_recall使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.utils.metrics
的用法示例。
在下文中一共展示了metrics.compute_precision_recall方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_compute_precision_recall
# 需要导入模块: from object_detection.utils import metrics [as 别名]
# 或者: from object_detection.utils.metrics import compute_precision_recall [as 别名]
def test_compute_precision_recall(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 1, 1, 0, 0, 1], dtype=bool)
labels_float_type = np.array([0, 1, 1, 0, 0, 1], dtype=float)
accumulated_tp_count = np.array([0, 1, 1, 2, 2, 3], dtype=float)
expected_precision = accumulated_tp_count / np.array([1, 2, 3, 4, 5, 6])
expected_recall = accumulated_tp_count / num_gt
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
precision_float_type, recall_float_type = metrics.compute_precision_recall(
scores, labels_float_type, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)
self.assertAllClose(precision_float_type, expected_precision)
self.assertAllClose(recall_float_type, expected_recall)
示例2: test_compute_precision_recall
# 需要导入模块: from object_detection.utils import metrics [as 别名]
# 或者: from object_detection.utils.metrics import compute_precision_recall [as 别名]
def test_compute_precision_recall(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 1, 1, 0, 0, 1], dtype=bool)
accumulated_tp_count = np.array([0, 1, 1, 2, 2, 3], dtype=float)
expected_precision = accumulated_tp_count / np.array([1, 2, 3, 4, 5, 6])
expected_recall = accumulated_tp_count / num_gt
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)
示例3: test_compute_precision_recall_and_ap_no_groundtruth
# 需要导入模块: from object_detection.utils import metrics [as 别名]
# 或者: from object_detection.utils.metrics import compute_precision_recall [as 别名]
def test_compute_precision_recall_and_ap_no_groundtruth(self):
num_gt = 0
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
expected_precision = None
expected_recall = None
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
self.assertEquals(precision, expected_precision)
self.assertEquals(recall, expected_recall)
ap = metrics.compute_average_precision(precision, recall)
self.assertTrue(np.isnan(ap))
示例4: evaluate
# 需要导入模块: from object_detection.utils import metrics [as 别名]
# 或者: from object_detection.utils.metrics import compute_precision_recall [as 别名]
def evaluate(self):
"""Compute evaluation result.
Returns:
average_precision_per_class: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions_per_class: List of precisions, each precision is a float numpy
array
recalls_per_class: List of recalls, each recall is a float numpy array
corloc_per_class: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warn(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
self.precisions_per_class.append(precision)
self.recalls_per_class.append(recall)
average_precision = metrics.compute_average_precision(precision, recall)
self.average_precision_per_class[class_index] = average_precision
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return (self.average_precision_per_class, mean_ap,
self.precisions_per_class, self.recalls_per_class,
self.corloc_per_class, mean_corloc)
示例5: test_compute_precision_recall_float
# 需要导入模块: from object_detection.utils import metrics [as 别名]
# 或者: from object_detection.utils.metrics import compute_precision_recall [as 别名]
def test_compute_precision_recall_float(self):
num_gt = 10
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels_float = np.array([0, 1, 1, 0.5, 0, 1], dtype=float)
expected_precision = np.array(
[0., 0.5, 0.33333333, 0.5, 0.55555556, 0.63636364], dtype=float)
expected_recall = np.array([0., 0.1, 0.1, 0.2, 0.25, 0.35], dtype=float)
precision, recall = metrics.compute_precision_recall(
scores, labels_float, num_gt)
self.assertAllClose(precision, expected_precision)
self.assertAllClose(recall, expected_recall)