本文整理匯總了Python中object_detection.utils.metrics.compute_average_precision方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.compute_average_precision方法的具體用法?Python metrics.compute_average_precision怎麽用?Python metrics.compute_average_precision使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類object_detection.utils.metrics
的用法示例。
在下文中一共展示了metrics.compute_average_precision方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_compute_average_precision
# 需要導入模塊: from object_detection.utils import metrics [as 別名]
# 或者: from object_detection.utils.metrics import compute_average_precision [as 別名]
def test_compute_average_precision(self):
precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
processed_precision = np.array([0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0],
dtype=float)
recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
expected_mean_ap = np.sum(recall_interval * processed_precision)
mean_ap = metrics.compute_average_precision(precision, recall)
self.assertAlmostEqual(expected_mean_ap, mean_ap)
示例2: test_compute_precision_recall_and_ap_no_groundtruth
# 需要導入模塊: from object_detection.utils import metrics [as 別名]
# 或者: from object_detection.utils.metrics import compute_average_precision [as 別名]
def test_compute_precision_recall_and_ap_no_groundtruth(self):
num_gt = 0
scores = np.array([0.4, 0.3, 0.6, 0.2, 0.7, 0.1], dtype=float)
labels = np.array([0, 0, 0, 0, 0, 0], dtype=bool)
expected_precision = None
expected_recall = None
precision, recall = metrics.compute_precision_recall(scores, labels, num_gt)
self.assertEquals(precision, expected_precision)
self.assertEquals(recall, expected_recall)
ap = metrics.compute_average_precision(precision, recall)
self.assertTrue(np.isnan(ap))
示例3: evaluate
# 需要導入模塊: from object_detection.utils import metrics [as 別名]
# 或者: from object_detection.utils.metrics import compute_average_precision [as 別名]
def evaluate(self):
"""Compute evaluation result.
Returns:
average_precision_per_class: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions_per_class: List of precisions, each precision is a float numpy
array
recalls_per_class: List of recalls, each recall is a float numpy array
corloc_per_class: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warn(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)))
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
self.precisions_per_class.append(precision)
self.recalls_per_class.append(recall)
average_precision = metrics.compute_average_precision(precision, recall)
self.average_precision_per_class[class_index] = average_precision
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return (self.average_precision_per_class, mean_ap,
self.precisions_per_class, self.recalls_per_class,
self.corloc_per_class, mean_corloc)
示例4: test_compute_average_precision
# 需要導入模塊: from object_detection.utils import metrics [as 別名]
# 或者: from object_detection.utils.metrics import compute_average_precision [as 別名]
def test_compute_average_precision(self):
precision = np.array([0.8, 0.76, 0.9, 0.65, 0.7, 0.5, 0.55, 0], dtype=float)
recall = np.array([0.3, 0.3, 0.4, 0.4, 0.45, 0.45, 0.5, 0.5], dtype=float)
processed_precision = np.array(
[0.9, 0.9, 0.9, 0.7, 0.7, 0.55, 0.55, 0], dtype=float)
recall_interval = np.array([0.3, 0, 0.1, 0, 0.05, 0, 0.05, 0], dtype=float)
expected_mean_ap = np.sum(recall_interval * processed_precision)
mean_ap = metrics.compute_average_precision(precision, recall)
self.assertAlmostEqual(expected_mean_ap, mean_ap)