本文整理汇总了Python中pycocotools.cocoeval.COCOeval类的典型用法代码示例。如果您正苦于以下问题:Python COCOeval类的具体用法?Python COCOeval怎么用?Python COCOeval使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了COCOeval类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _do_python_eval
def _do_python_eval(self, _coco):
coco_dt = _coco.loadRes(self._result_file)
coco_eval = COCOeval(_coco, coco_dt)
coco_eval.params.useSegm = False
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_metrics(coco_eval)
示例2: _do_segmentation_eval
def _do_segmentation_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval)
eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
robust_pickle_dump(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
示例3: _do_detection_eval
def _do_detection_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
示例4: _do_detection_eval
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = osp.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
print('Wrote COCO eval results to: {}'.format(eval_file))
示例5: evaluate_coco
def evaluate_coco(model, dataset, coco, config, eval_type="bbox", limit=None, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
if i%10==0:
print('Processed %d images'%i )
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = inference(image, model, config)
t_prediction += (time.time() - t)
# Convert results to COCO format
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"], r["masks"])
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
# Only evaluate for person.
cocoEval.params.catIds = coco.getCatIds(catNms=['person'])
cocoEval.evaluate()
a=cocoEval.accumulate()
b=cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
示例6: _do_eval
def _do_eval(res_file, output_dir,_COCO,classes):
## The function is borrowed from https://github.com/rbgirshick/fast-rcnn/ and changed
ann_type = 'bbox'
coco_dt = _COCO.loadRes(res_file)
coco_eval = COCOeval(_COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
_print_eval_metrics(coco_eval,classes)
# Write the result file
eval_file = osp.join(output_dir)
eval_result = {}
eval_result['precision'] = coco_eval.eval['precision']
eval_result['recall'] = coco_eval.eval['recall']
sio.savemat(eval_file,eval_result)
print 'Wrote COCO eval results to: {}'.format(eval_file)
示例7: evaluate
def evaluate():
cocoGt = COCO('annotations.json')
cocoDt = cocoGt.loadRes('detections.json')
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例8: coco_evaluate
def coco_evaluate(json_dataset, res_file, image_ids):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
示例9: cocoval
def cocoval(detected_json):
eval_json = config.eval_json
eval_gt = COCO(eval_json)
eval_dt = eval_gt.loadRes(detected_json)
cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')
# cocoEval.params.imgIds = eval_gt.getImgIds()
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例10: __init__
def __init__(self, dataset_json, preds_json):
# load dataset ground truths
self.dataset = COCO(dataset_json)
category_ids = self.dataset.getCatIds()
categories = [x['name'] for x in self.dataset.loadCats(category_ids)]
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
# load predictions
self.preds = self.dataset.loadRes(preds_json)
self.coco_eval = COCOeval(self.dataset, self.preds, 'segm')
self.coco_eval.params.maxDets = [1, 50, 255]
示例11: compute_ap
def compute_ap(self):
coco_res = self.loader.coco.loadRes(self.filename)
cocoEval = COCOeval(self.loader.coco, coco_res)
cocoEval.params.imgIds = self.loader.get_filenames()
cocoEval.params.useSegm = False
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
return cocoEval
示例12: _do_coco_eval
def _do_coco_eval(self, dtFile, output_dir):
"""
Evaluate using COCO API
"""
if self._image_set == 'train' or self._image_set == 'val':
cocoGt = self._coco[0]
cocoDt = COCO(dtFile)
E = COCOeval(cocoGt, cocoDt)
E.evaluate()
E.accumulate()
E.summarize()
示例13: evaluate_detections
def evaluate_detections(self, all_boxes, output_dir=None):
resFile = self._write_coco_results_file(all_boxes)
cocoGt = self._annotations
cocoDt = cocoGt.loadRes(resFile)
# running evaluation
cocoEval = COCOeval(cocoGt,cocoDt)
# useSegm should default to 0
#cocoEval.params.useSegm = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例14: _update
def _update(self):
"""Use coco to get real scores. """
if not self._current_id == len(self._img_ids):
warnings.warn(
'Recorded {} out of {} validation images, incompelete results'.format(
self._current_id, len(self._img_ids)))
import json
try:
with open(self._filename, 'w') as f:
json.dump(self._results, f)
except IOError as e:
raise RuntimeError("Unable to dump json file, ignored. What(): {}".format(str(e)))
pred = self.dataset.coco.loadRes(self._filename)
gt = self.dataset.coco
# lazy import pycocotools
try_import_pycocotools()
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
self._coco_eval = coco_eval
return coco_eval
示例15: _do_keypoint_eval
def _do_keypoint_eval(json_dataset, res_file, output_dir):
ann_type = 'keypoints'
imgIds = json_dataset.COCO.getImgIds()
imgIds.sort()
coco_dt = json_dataset.COCO.loadRes(res_file)
coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
robust_pickle_dump(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
coco_eval.summarize()