当前位置: 首页>>代码示例>>Python>>正文


Python json_dataset_evaluator.evaluate_boxes方法代码示例

本文整理汇总了Python中datasets.json_dataset_evaluator.evaluate_boxes方法的典型用法代码示例。如果您正苦于以下问题:Python json_dataset_evaluator.evaluate_boxes方法的具体用法?Python json_dataset_evaluator.evaluate_boxes怎么用?Python json_dataset_evaluator.evaluate_boxes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在datasets.json_dataset_evaluator的用法示例。


在下文中一共展示了json_dataset_evaluator.evaluate_boxes方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: evaluate_all

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_all(
    dataset, all_boxes, all_segms, all_keyps, output_dir, use_matlab=False
):
    """Evaluate "all" tasks, where "all" includes box detection, instance
    segmentation, and keypoint detection.
    """
    all_results = evaluate_boxes(
        dataset, all_boxes, output_dir, use_matlab=use_matlab
    )
    logger.info('Evaluating bounding boxes is done!')
    if cfg.MODEL.MASK_ON:
        results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
        all_results[dataset.name].update(results[dataset.name])
        logger.info('Evaluating segmentations is done!')
    if cfg.MODEL.KEYPOINTS_ON:
        results = evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir)
        all_results[dataset.name].update(results[dataset.name])
        logger.info('Evaluating keypoints is done!')
    return all_results 
开发者ID:roytseng-tw,项目名称:Detectron.pytorch,代码行数:21,代码来源:task_evaluation.py

示例2: evaluate_boxes

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_boxes(dataset, all_boxes, output_dir, test_corloc=False, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_voc_evaluator(dataset):
        # For VOC, always use salt and always cleanup because results are
        # written to the shared VOCdevkit results directory
        voc_eval = voc_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, test_corloc=test_corloc,
            use_matlab=use_matlab
        )
        box_results = _voc_eval_to_box_results(voc_eval)
    else:
        raise NotImplementedError(
            'No evaluator for dataset: {}'.format(dataset.name)
        )
    return OrderedDict([(dataset.name, box_results)]) 
开发者ID:ppengtang,项目名称:pcl.pytorch,代码行数:24,代码来源:task_evaluation.py

示例3: evaluate_all

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_all(
    dataset, all_boxes, all_segms, all_keyps, output_dir, use_matlab=False
):
    """Evaluate "all" tasks, where "all" includes box detection, instance
    segmentation.
    """
    all_results = evaluate_boxes(
        dataset, all_boxes, output_dir, use_matlab=use_matlab
    )
    logger.info('Evaluating bounding boxes is done!')
    if cfg.MODEL.MASK_ON:
        results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
        all_results[dataset.name].update(results[dataset.name])
        logger.info('Evaluating segmentations is done!')
    
    return all_results 
开发者ID:jz462,项目名称:Large-Scale-VRD.pytorch,代码行数:18,代码来源:task_evaluation.py

示例4: evaluate_boxes

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_cityscapes_evaluator(dataset):
        logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_voc_evaluator(dataset):
        # For VOC, always use salt and always cleanup because results are
        # written to the shared VOCdevkit results directory
        voc_eval = voc_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_matlab=use_matlab
        )
        box_results = _voc_eval_to_box_results(voc_eval)
    else:
        raise NotImplementedError(
            'No evaluator for dataset: {}'.format(dataset.name)
        )
    return OrderedDict([(dataset.name, box_results)]) 
开发者ID:roytseng-tw,项目名称:Detectron.pytorch,代码行数:29,代码来源:task_evaluation.py

示例5: evaluate_all

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_all(
    dataset, all_boxes, output_dir, test_corloc=False, use_matlab=False
):
    """Evaluate "all" tasks, where "all" includes box detection, instance
    segmentation, and keypoint detection.
    """
    all_results = evaluate_boxes(
        dataset, all_boxes, output_dir, test_corloc=test_corloc,
        use_matlab=use_matlab
    )
    logger.info('Evaluating bounding boxes is done!')
    return all_results 
开发者ID:ppengtang,项目名称:pcl.pytorch,代码行数:14,代码来源:task_evaluation.py

示例6: evaluate_boxes

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_cityscapes_evaluator(dataset):
        logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_vg_evaluator(dataset):
        logger.warn('Visual Genome bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_voc_evaluator(dataset):
        # For VOC, always use salt and always cleanup because results are
        # written to the shared VOCdevkit results directory
        voc_eval = voc_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_matlab=use_matlab
        )
        box_results = _voc_eval_to_box_results(voc_eval)
    else:
        raise NotImplementedError(
            'No evaluator for dataset: {}'.format(dataset.name)
        )
    return OrderedDict([(dataset.name, box_results)]) 
开发者ID:ruotianluo,项目名称:Context-aware-ZSR,代码行数:35,代码来源:task_evaluation.py

示例7: evaluate_boxes

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_cityscapes_evaluator(dataset):
        logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    elif _use_voc_evaluator(dataset):
        # For VOC, always use salt and always cleanup because results are
        # written to the shared VOCdevkit results directory
        voc_eval = voc_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_matlab=use_matlab
        )
        box_results = _voc_eval_to_box_results(voc_eval)
    elif _use_no_evaluator(dataset):
        box_results = _empty_box_results()
    else:
        raise NotImplementedError(
            'No evaluator for dataset: {}'.format(dataset.name)
        )
    return OrderedDict([(dataset.name, box_results)]) 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:31,代码来源:task_evaluation.py

示例8: evaluate_all

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_all(
    dataset, all_boxes, all_segms, all_keyps, all_hois, all_keyps_vcoco, output_dir, use_matlab=False
):
    """Evaluate "all" tasks, where "all" includes box detection, instance
    segmentation, and keypoint detection.
    """
    all_results = evaluate_boxes(
        dataset, all_boxes, output_dir, use_matlab=use_matlab
    )
    logger.info('Evaluating bounding boxes is done!')
    if cfg.MODEL.MASK_ON:
        results = evaluate_masks(dataset, all_boxes, all_segms, output_dir)
        all_results[dataset.name].update(results[dataset.name])
        logger.info('Evaluating segmentations is done!')
    if cfg.MODEL.KEYPOINTS_ON:
        results = evaluate_keypoints(dataset, all_boxes, all_keyps, output_dir)
        all_results[dataset.name].update(results[dataset.name])
        logger.info('Evaluating keypoints is done!')
    if cfg.MODEL.VCOCO_ON:
        results = evaluate_hoi_vcoco(dataset, all_hois, output_dir)
        #all_results[dataset.name].update(results[dataset.name])
        # if cfg.VCOCO.KEYPOINTS_ON:
            # results = evaluate_keypoints(dataset, all_boxes, all_keyps_vcoco, output_dir)
            # all_results[dataset.name].update(results[dataset.name])
        logger.info('Evaluating hois is done!')
    return all_results 
开发者ID:bobwan1995,项目名称:PMFNet,代码行数:28,代码来源:task_evaluation.py

示例9: evaluate_boxes

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
    """Evaluate bounding box detection."""
    logger.info('Evaluating detections')
    not_comp = not cfg.TEST.COMPETITION_MODE
    if _use_json_dataset_evaluator(dataset):
        coco_eval = json_dataset_evaluator.evaluate_boxes(
            dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp
        )
        box_results = _coco_eval_to_box_results(coco_eval)
    else:
        raise NotImplementedError(
            'No evaluator for dataset: {}'.format(dataset.name)
        )
    return OrderedDict([(dataset.name, box_results)]) 
开发者ID:jz462,项目名称:Large-Scale-VRD.pytorch,代码行数:16,代码来源:task_evaluation.py

示例10: eval_json

# 需要导入模块: from datasets import json_dataset_evaluator [as 别名]
# 或者: from datasets.json_dataset_evaluator import evaluate_boxes [as 别名]
def eval_json(det_json,gt_json):
    json_dataset = JsonDataset(gt_dataset_name)
    gt_json = dataset_catalog.DATASETS[gt_dataset_name]['annotation_file']
    with open(det_json,'rb') as f:
        det = json.load(f)
    f.close()
    with open(gt_json,'rb') as f:
        gt = json.load(f)
    f.close()

    # convert det to the all_boxes list
    num_images = len(gt['images'])
    num_classes = 2
    print('Total number of images:',len(det['images']))
    all_boxes, all_segms, all_keyps = empty_results(num_classes,num_images)
    for cls in range(num_classes):
        for image in range(num_images):
            filename = gt['images'][image]['file_name']
            fid = gt['images'][image]['id']
            img_prop = get_by_filename(det,filename)
            if not (img_prop is None):
                img_id,det_prop = img_prop
                boxes = get_boxes_by_img_id(det,img_id)
                if image%100 == 0:
                    print('Reading detections for:',filename,'--',det_prop['file_name'])
                    print('Det json:',det_json)
                if 'score' in boxes[0]:
                    boxes = np.array([b['bbox']+[b['score']] for b in boxes])
                else:
                    boxes = np.array([b['bbox'] for b in boxes])
                if len(boxes) > 0:
                    # add w, h to get (x2,y2)
                    boxes[:,2] += boxes[:,0]
                    boxes[:,3] += boxes[:,1]
                    all_boxes[cls][image] = boxes
            else:
                all_boxes[cls][image] = []
    # save detections
    with open(os.path.join(output_dir,'detections.pkl'),'wb') as f:
        pickle.dump(dict(all_boxes=all_boxes,all_segms=all_segms,all_keyps=all_keyps),f)
    f.close()
    #input(len(all_boxes[0]))
    coco_eval = evaluate_boxes(json_dataset,all_boxes,output_dir)
    #coco_eval = task_evaluation.evaluate_all(json_dataset,all_boxes,all_segms,all_keyps,output_dir)

    disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.5, output_dir=output_dir)
    disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.75, iou_high=0.75, output_dir=output_dir)
    disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.95, output_dir=output_dir) 
开发者ID:AruniRC,项目名称:detectron-self-train,代码行数:50,代码来源:evaluate_json.py


注:本文中的datasets.json_dataset_evaluator.evaluate_boxes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。