本文整理匯總了Python中datasets.json_dataset_evaluator.evaluate_boxes方法的典型用法代碼示例。如果您正苦於以下問題:Python json_dataset_evaluator.evaluate_boxes方法的具體用法?Python json_dataset_evaluator.evaluate_boxes怎麽用?Python json_dataset_evaluator.evaluate_boxes使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類datasets.json_dataset_evaluator
的用法示例。
在下文中一共展示了json_dataset_evaluator.evaluate_boxes方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate_all
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_all(
dataset, all_boxes, all_segms, all_keyps, output_dir, use_matlab=False
):
"""Evaluate "all" tasks, where "all" includes box detection, instance
segmentation, and keypoint detection.
"""
all_results = evaluate_boxes(
dataset, all_boxes, output_dir, use_matlab=use_matlab
)
logger.info('Evaluating bounding boxes is done!')
if cfg.MODEL.MASK_ON:
results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating segmentations is done!')
if cfg.MODEL.KEYPOINTS_ON:
results = evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating keypoints is done!')
return all_results
示例2: evaluate_boxes
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_boxes(dataset, all_boxes, output_dir, test_corloc=False, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_voc_evaluator(dataset):
# For VOC, always use salt and always cleanup because results are
# written to the shared VOCdevkit results directory
voc_eval = voc_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, test_corloc=test_corloc,
use_matlab=use_matlab
)
box_results = _voc_eval_to_box_results(voc_eval)
else:
raise NotImplementedError(
'No evaluator for dataset: {}'.format(dataset.name)
)
return OrderedDict([(dataset.name, box_results)])
示例3: evaluate_all
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_all(
dataset, all_boxes, all_segms, all_keyps, output_dir, use_matlab=False
):
"""Evaluate "all" tasks, where "all" includes box detection, instance
segmentation.
"""
all_results = evaluate_boxes(
dataset, all_boxes, output_dir, use_matlab=use_matlab
)
logger.info('Evaluating bounding boxes is done!')
if cfg.MODEL.MASK_ON:
results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating segmentations is done!')
return all_results
示例4: evaluate_boxes
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_cityscapes_evaluator(dataset):
logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_voc_evaluator(dataset):
# For VOC, always use salt and always cleanup because results are
# written to the shared VOCdevkit results directory
voc_eval = voc_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_matlab=use_matlab
)
box_results = _voc_eval_to_box_results(voc_eval)
else:
raise NotImplementedError(
'No evaluator for dataset: {}'.format(dataset.name)
)
return OrderedDict([(dataset.name, box_results)])
示例5: evaluate_all
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_all(
dataset, all_boxes, output_dir, test_corloc=False, use_matlab=False
):
"""Evaluate "all" tasks, where "all" includes box detection, instance
segmentation, and keypoint detection.
"""
all_results = evaluate_boxes(
dataset, all_boxes, output_dir, test_corloc=test_corloc,
use_matlab=use_matlab
)
logger.info('Evaluating bounding boxes is done!')
return all_results
示例6: evaluate_boxes
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_cityscapes_evaluator(dataset):
logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_vg_evaluator(dataset):
logger.warn('Visual Genome bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_voc_evaluator(dataset):
# For VOC, always use salt and always cleanup because results are
# written to the shared VOCdevkit results directory
voc_eval = voc_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_matlab=use_matlab
)
box_results = _voc_eval_to_box_results(voc_eval)
else:
raise NotImplementedError(
'No evaluator for dataset: {}'.format(dataset.name)
)
return OrderedDict([(dataset.name, box_results)])
示例7: evaluate_boxes
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_cityscapes_evaluator(dataset):
logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
elif _use_voc_evaluator(dataset):
# For VOC, always use salt and always cleanup because results are
# written to the shared VOCdevkit results directory
voc_eval = voc_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_matlab=use_matlab
)
box_results = _voc_eval_to_box_results(voc_eval)
elif _use_no_evaluator(dataset):
box_results = _empty_box_results()
else:
raise NotImplementedError(
'No evaluator for dataset: {}'.format(dataset.name)
)
return OrderedDict([(dataset.name, box_results)])
示例8: evaluate_all
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_all(
dataset, all_boxes, all_segms, all_keyps, all_hois, all_keyps_vcoco, output_dir, use_matlab=False
):
"""Evaluate "all" tasks, where "all" includes box detection, instance
segmentation, and keypoint detection.
"""
all_results = evaluate_boxes(
dataset, all_boxes, output_dir, use_matlab=use_matlab
)
logger.info('Evaluating bounding boxes is done!')
if cfg.MODEL.MASK_ON:
results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating segmentations is done!')
if cfg.MODEL.KEYPOINTS_ON:
results = evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir)
all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating keypoints is done!')
if cfg.MODEL.VCOCO_ON:
results = evaluate_hoi_vcoco(dataset, all_hois, output_dir)
#all_results[dataset.name].update(results[dataset.name])
# if cfg.VCOCO.KEYPOINTS_ON:
# results = evaluate_keypoints(dataset, all_boxes, all_keyps_vcoco, output_dir)
# all_results[dataset.name].update(results[dataset.name])
logger.info('Evaluating hois is done!')
return all_results
示例9: evaluate_boxes
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(
dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
)
box_results = _coco_eval_to_box_results(coco_eval)
else:
raise NotImplementedError(
'No evaluator for dataset: {}'.format(dataset.name)
)
return OrderedDict([(dataset.name, box_results)])
示例10: eval_json
# 需要導入模塊: from datasets import json_dataset_evaluator [as 別名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 別名]
def eval_json(det_json,gt_json):
json_dataset = JsonDataset(gt_dataset_name)
gt_json = dataset_catalog.DATASETS[gt_dataset_name]['annotation_file']
with open(det_json,'rb') as f:
det = json.load(f)
f.close()
with open(gt_json,'rb') as f:
gt = json.load(f)
f.close()
# convert det to the all_boxes list
num_images = len(gt['images'])
num_classes = 2
print('Total number of images:',len(det['images']))
all_boxes, all_segms, all_keyps = empty_results(num_classes,num_images)
for cls in range(num_classes):
for image in range(num_images):
filename = gt['images'][image]['file_name']
fid = gt['images'][image]['id']
img_prop = get_by_filename(det,filename)
if not (img_prop is None):
img_id,det_prop = img_prop
boxes = get_boxes_by_img_id(det,img_id)
if image%100 == 0:
print('Reading detections for:',filename,'--',det_prop['file_name'])
print('Det json:',det_json)
if 'score' in boxes[0]:
boxes = np.array([b['bbox']+[b['score']] for b in boxes])
else:
boxes = np.array([b['bbox'] for b in boxes])
if len(boxes) > 0:
# add w, h to get (x2,y2)
boxes[:,2] += boxes[:,0]
boxes[:,3] += boxes[:,1]
all_boxes[cls][image] = boxes
else:
all_boxes[cls][image] = []
# save detections
with open(os.path.join(output_dir,'detections.pkl'),'wb') as f:
pickle.dump(dict(all_boxes=all_boxes,all_segms=all_segms,all_keyps=all_keyps),f)
f.close()
#input(len(all_boxes[0]))
coco_eval = evaluate_boxes(json_dataset,all_boxes,output_dir)
#coco_eval = task_evaluation.evaluate_all(json_dataset,all_boxes,all_segms,all_keyps,output_dir)
disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.5, output_dir=output_dir)
disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.75, iou_high=0.75, output_dir=output_dir)
disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.95, output_dir=output_dir)