當前位置: 首頁>>代碼示例>>Python>>正文


Python task_evaluation.evaluate_all方法代碼示例

本文整理匯總了Python中datasets.task_evaluation.evaluate_all方法的典型用法代碼示例。如果您正苦於以下問題:Python task_evaluation.evaluate_all方法的具體用法?Python task_evaluation.evaluate_all怎麽用?Python task_evaluation.evaluate_all使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在datasets.task_evaluation的用法示例。


在下文中一共展示了task_evaluation.evaluate_all方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: do_reval

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def do_reval(dataset_name, output_dir, args):
    dataset = JsonDataset(dataset_name)
    with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
        dets = pickle.load(f)
    # Override config with the one saved in the detections file
    if args.cfg_file is not None:
        core.config.merge_cfg_from_cfg(yaml.load(dets['cfg']))
    else:
        core.config._merge_a_into_b(yaml.load(dets['cfg']), cfg)
    results = task_evaluation.evaluate_all(
        dataset,
        dets['all_boxes'],
        dets['all_segms'],
        dets['all_keyps'],
        output_dir,
        use_matlab=args.matlab_eval
    )
    task_evaluation.log_copy_paste_friendly_results(results) 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:20,代碼來源:reval.py

示例2: test_net_on_dataset

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def test_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(cfg.TEST.DATASET)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(output_dir, gpu_id=gpu_id)
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
開發者ID:gangadhar-p,項目名稱:NucleiDetectron,代碼行數:20,代碼來源:test_engine.py

示例3: test_net_on_dataset

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
開發者ID:roytseng-tw,項目名稱:Detectron.pytorch,代碼行數:28,代碼來源:test_engine.py

示例4: test_net_on_dataset

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes = test_net(
            args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))

    roidb = dataset.get_roidb()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES + 1
    final_boxes = empty_results(num_classes, num_images)
    test_corloc = 'train' in dataset_name
    for i, entry in enumerate(roidb):
        boxes = all_boxes[entry['image']]
        if test_corloc:
            _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])
        else:
            _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'],
                                                         boxes['boxes'])
        extend_results(i, final_boxes, cls_boxes_i)
    results = task_evaluation.evaluate_all(
        dataset, final_boxes, output_dir, test_corloc
    )
    return results 
開發者ID:ppengtang,項目名稱:pcl.pytorch,代碼行數:42,代碼來源:test_engine.py

示例5: test_net_on_dataset

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def test_net_on_dataset(
        args,
        dataset_name,
        proposal_file,
        output_dir,
        ind_range=None,
        multi_gpu=False,
        gpu_id=0):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            args, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))

    dataset.test_img_ids = sorted(dataset.COCO.getImgIds())
    if ind_range is not None:
        dataset.test_img_ids = dataset.test_img_ids[ind_range[0]:ind_range[1]]

    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
開發者ID:ruotianluo,項目名稱:Context-aware-ZSR,代碼行數:34,代碼來源:test_engine.py

示例6: test_net_on_dataset

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:29,代碼來源:test_engine.py

示例7: eval_json

# 需要導入模塊: from datasets import task_evaluation [as 別名]
# 或者: from datasets.task_evaluation import evaluate_all [as 別名]
def eval_json(det_json,gt_json):
    json_dataset = JsonDataset(gt_dataset_name)
    gt_json = dataset_catalog.DATASETS[gt_dataset_name]['annotation_file']
    with open(det_json,'rb') as f:
        det = json.load(f)
    f.close()
    with open(gt_json,'rb') as f:
        gt = json.load(f)
    f.close()

    # convert det to the all_boxes list
    num_images = len(gt['images'])
    num_classes = 2
    print('Total number of images:',len(det['images']))
    all_boxes, all_segms, all_keyps = empty_results(num_classes,num_images)
    for cls in range(num_classes):
        for image in range(num_images):
            filename = gt['images'][image]['file_name']
            fid = gt['images'][image]['id']
            img_prop = get_by_filename(det,filename)
            if not (img_prop is None):
                img_id,det_prop = img_prop
                boxes = get_boxes_by_img_id(det,img_id)
                if image%100 == 0:
                    print('Reading detections for:',filename,'--',det_prop['file_name'])
                    print('Det json:',det_json)
                if 'score' in boxes[0]:
                    boxes = np.array([b['bbox']+[b['score']] for b in boxes])
                else:
                    boxes = np.array([b['bbox'] for b in boxes])
                if len(boxes) > 0:
                    # add w, h to get (x2,y2)
                    boxes[:,2] += boxes[:,0]
                    boxes[:,3] += boxes[:,1]
                    all_boxes[cls][image] = boxes
            else:
                all_boxes[cls][image] = []
    # save detections
    with open(os.path.join(output_dir,'detections.pkl'),'wb') as f:
        pickle.dump(dict(all_boxes=all_boxes,all_segms=all_segms,all_keyps=all_keyps),f)
    f.close()
    #input(len(all_boxes[0]))
    coco_eval = evaluate_boxes(json_dataset,all_boxes,output_dir)
    #coco_eval = task_evaluation.evaluate_all(json_dataset,all_boxes,all_segms,all_keyps,output_dir)

    disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.5, output_dir=output_dir)
    disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.75, iou_high=0.75, output_dir=output_dir)
    disp_detection_eval_metrics(json_dataset, coco_eval, iou_low=0.5, iou_high=0.95, output_dir=output_dir) 
開發者ID:AruniRC,項目名稱:detectron-self-train,代碼行數:50,代碼來源:evaluate_json.py


注:本文中的datasets.task_evaluation.evaluate_all方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。