本文整理汇总了Python中pycocotools.cocoeval.COCOeval.summarize方法的典型用法代码示例。如果您正苦于以下问题:Python COCOeval.summarize方法的具体用法?Python COCOeval.summarize怎么用?Python COCOeval.summarize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pycocotools.cocoeval.COCOeval
的用法示例。
在下文中一共展示了COCOeval.summarize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def evaluate():
cocoGt = COCO('annotations.json')
cocoDt = cocoGt.loadRes('detections.json')
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例2: coco_evaluate
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def coco_evaluate(json_dataset, res_file, image_ids):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
示例3: compute_ap
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def compute_ap(self):
coco_res = self.loader.coco.loadRes(self.filename)
cocoEval = COCOeval(self.loader.coco, coco_res)
cocoEval.params.imgIds = self.loader.get_filenames()
cocoEval.params.useSegm = False
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
return cocoEval
示例4: _do_coco_eval
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def _do_coco_eval(self, dtFile, output_dir):
"""
Evaluate using COCO API
"""
if self._image_set == 'train' or self._image_set == 'val':
cocoGt = self._coco[0]
cocoDt = COCO(dtFile)
E = COCOeval(cocoGt, cocoDt)
E.evaluate()
E.accumulate()
E.summarize()
示例5: evaluate_detections
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def evaluate_detections(self, all_boxes, output_dir=None):
resFile = self._write_coco_results_file(all_boxes)
cocoGt = self._annotations
cocoDt = cocoGt.loadRes(resFile)
# running evaluation
cocoEval = COCOeval(cocoGt,cocoDt)
# useSegm should default to 0
#cocoEval.params.useSegm = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例6: cocoval
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def cocoval(detected_json):
eval_json = config.eval_json
eval_gt = COCO(eval_json)
eval_dt = eval_gt.loadRes(detected_json)
cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')
# cocoEval.params.imgIds = eval_gt.getImgIds()
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
示例7: _do_keypoint_eval
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def _do_keypoint_eval(json_dataset, res_file, output_dir):
ann_type = 'keypoints'
imgIds = json_dataset.COCO.getImgIds()
imgIds.sort()
coco_dt = json_dataset.COCO.loadRes(res_file)
coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
robust_pickle_dump(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
coco_eval.summarize()
示例8: evaluate_coco
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
示例9: validate
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def validate(val_loader, model, i, silence=False):
batch_time = AverageMeter()
coco_gt = val_loader.dataset.coco
coco_pred = COCO()
coco_pred.dataset['images'] = [img for img in coco_gt.datasets['images']]
coco_pred.dataset['categories'] = copy.deepcopy(coco_gt.dataset['categories'])
id = 0
# switch to evaluate mode
model.eval()
end = time.time()
for i, (inputs, anns) in enumerate(val_loader):
# forward images one by one (TODO: support batch mode later, or
# multiprocess)
for j, input in enumerate(inputs):
input_anns= anns[j] # anns of this input
gt_bbox= np.vstack([ann['bbox'] + [ann['ordered_id']] for ann in input_anns])
im_info= [[input.size(1), input.size(2),
input_anns[0]['scale_ratio']]]
input_var= Variable(input.unsqueeze(0),
requires_grad=False).cuda()
cls_prob, bbox_pred, rois = model(input_var, im_info)
scores, pred_boxes = model.interpret_outputs(cls_prob, bbox_pred, rois, im_info)
print(scores, pred_boxes)
# for i in range(scores.shape[0]):
# measure elapsed time
batch_time.update(time.time() - end)
end= time.time()
coco_pred.createIndex()
coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
coco_eval.params.imgIds= sorted(coco_gt.getImgIds())
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
print('iter: [{0}] '
'Time {batch_time.avg:.3f} '
'Val Stats: {1}'
.format(i, coco_eval.stats,
batch_time=batch_time))
return coco_eval.stats[0]
示例10: evaluate_predictions_on_coco
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def evaluate_predictions_on_coco(
coco_gt, coco_results, json_result_file, iou_type="bbox"
):
import json
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.cocoeval import COCOeval
coco_dt = coco_gt.loadRes(str(json_result_file))
# coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
示例11: calc_coco_metrics
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def calc_coco_metrics(coco_annotations, predictions, classes):
annotations = ObjectDetectorJson.convert_coco_to_toolbox_format(coco_annotations, classes)
detections = []
for annotation, prediction in zip(annotations, predictions):
width, height = annotation['image_size']
image_id = annotation['image_id']
for obj_id, obj in enumerate(prediction):
label = int(obj[1])
score = float(obj[2])
if obj_id != 0 and score == 0: # At least one prediction must be (COCO API issue)
continue
bbox = (obj[3:]).tolist()
bbox[::2] = [width * i for i in bbox[::2]]
bbox[1::2] = [height * i for i in bbox[1::2]]
xmin, ymin, xmax, ymax = bbox
w_bbox = round(xmax - xmin, 1)
h_bbox = round(ymax - ymin, 1)
xmin, ymin = round(xmin, 1), round(ymin, 1)
coco_det = {}
coco_det['image_id'] = image_id
coco_det['category_id'] = label
coco_det['bbox'] = [xmin, ymin, w_bbox, h_bbox]
coco_det['score'] = score
detections.append(coco_det)
coco_dt = coco_annotations.loadRes(detections)
img_ids = sorted(coco_annotations.getImgIds())
coco_eval = COCOeval(coco_annotations, coco_dt, 'bbox')
coco_eval.params.imgIds = img_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
metrics = {}
for metric_name, value in zip(METRICS_NAMES, coco_eval.stats):
metrics[metric_name] = value
return metrics
示例12: print_evaluation_scores
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def print_evaluation_scores(json_file):
ret = {}
assert config.BASEDIR and os.path.isdir(config.BASEDIR)
annofile = os.path.join(
config.BASEDIR, 'annotations',
'instances_{}.json'.format(config.VAL_DATASET))
coco = COCO(annofile)
cocoDt = coco.loadRes(json_file)
cocoEval = COCOeval(coco, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
ret['mAP(bbox)'] = cocoEval.stats[0]
if config.MODE_MASK:
cocoEval = COCOeval(coco, cocoDt, 'segm')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
ret['mAP(segm)'] = cocoEval.stats[0]
return ret
示例13: evaluate_coco
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def evaluate_coco(model, dataset, coco, config, eval_type="bbox", limit=None, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
if i%10==0:
print('Processed %d images'%i )
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = inference(image, model, config)
t_prediction += (time.time() - t)
# Convert results to COCO format
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"], r["masks"])
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
# Only evaluate for person.
cocoEval.params.catIds = coco.getCatIds(catNms=['person'])
cocoEval.evaluate()
a=cocoEval.accumulate()
b=cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
示例14: print_evaluation_scores
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
def print_evaluation_scores(json_file):
ret = {}
assert config.BASEDIR and os.path.isdir(config.BASEDIR)
annofile = os.path.join(
config.BASEDIR, 'annotations',
'instances_{}.json'.format(config.VAL_DATASET))
coco = COCO(annofile)
cocoDt = coco.loadRes(json_file)
cocoEval = COCOeval(coco, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']
for k in range(6):
ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]
if config.MODE_MASK:
cocoEval = COCOeval(coco, cocoDt, 'segm')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
for k in range(6):
ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
return ret
示例15: COCO
# 需要导入模块: from pycocotools.cocoeval import COCOeval [as 别名]
# 或者: from pycocotools.cocoeval.COCOeval import summarize [as 别名]
from fast_rcnn.nms_wrapper import nms
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
if __name__ == '__main__':
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
annType = 'bbox'
ground_truth = '/mnt/d/BigData/COCO/instances_train-val2014/annotations/instances_val2014.json'
generated_result = '/mnt/c/Users/Lavenger/git/py-faster-rcnn/tools/result.json'
cocoGt = COCO(generated_result)
cocoDt = cocoGt.loadRes(generated_result)
cocoEval = COCOeval(cocoGt,cocoDt)
cocoEval.params.imgIds = imgIds
cocoEval.params.useSegm = False
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()