當前位置: 首頁>>代碼示例>>Python>>正文


Python boxes.nms方法代碼示例

本文整理匯總了Python中detectron.utils.boxes.nms方法的典型用法代碼示例。如果您正苦於以下問題:Python boxes.nms方法的具體用法?Python boxes.nms怎麽用?Python boxes.nms使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在detectron.utils.boxes的用法示例。


在下文中一共展示了boxes.nms方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: detect_image

# 需要導入模塊: from detectron.utils import boxes [as 別名]
# 或者: from detectron.utils.boxes import nms [as 別名]
def detect_image(detectron_model, image, args):
    """Given an image and a detectron model, extract object boxes,
    classes, confidences and features from the image using the model.

    Parameters
    ----------
    detectron_model
        Detectron model.
    image : np.ndarray
        Image in BGR format.
    args : argparse.Namespace
        Parsed command-line arguments.

    Returns
    -------
    np.ndarray, np.ndarray, np.ndarray, np.ndarray
        Object bounding boxes, classes, confidence and features.
    """

    scores, cls_boxes, im_scale = detectron_test.im_detect_bbox(
        detectron_model,
        image,
        detectron_config.TEST.SCALE,
        detectron_config.TEST.MAX_SIZE,
        boxes=None,
    )
    num_proposals = scores.shape[0]

    rois = workspace.FetchBlob(f"gpu_{args.gpu_id}/rois")
    features = workspace.FetchBlob(
        f"gpu_{args.gpu_id}/{args.feat_name}"
    )

    cls_boxes = rois[:, 1:5] / im_scale
    max_conf = np.zeros((num_proposals,), dtype=np.float32)
    max_cls = np.zeros((num_proposals,), dtype=np.int32)
    max_box = np.zeros((num_proposals, 4), dtype=np.float32)

    for cls_ind in range(1, detectron_config.MODEL.NUM_CLASSES):
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(
            np.float32
        )
        keep = np.array(detectron_nms(dets, detectron_config.TEST.NMS))
        idxs_update = np.where(cls_scores[keep] > max_conf[keep])
        keep_idxs = keep[idxs_update]
        max_conf[keep_idxs] = cls_scores[keep_idxs]
        max_cls[keep_idxs] = cls_ind
        max_box[keep_idxs] = dets[keep_idxs][:, :4]

    keep_boxes = np.argsort(max_conf)[::-1][:args.max_boxes]
    boxes = max_box[keep_boxes, :]
    classes = max_cls[keep_boxes]
    confidence = max_conf[keep_boxes]
    features = features[keep_boxes, :]
    return boxes, features, classes, confidence 
開發者ID:batra-mlp-lab,項目名稱:visdial-challenge-starter-pytorch,代碼行數:58,代碼來源:extract_features_detectron.py

示例2: mix_box_results_with_nms_and_limit

# 需要導入模塊: from detectron.utils import boxes [as 別名]
# 或者: from detectron.utils.boxes import nms [as 別名]
def mix_box_results_with_nms_and_limit(scores, boxes, mix_boxes):
    """Returns bounding-box detection results by thresholding on scores and
    applying non-maximum suppression (NMS).

    `boxes`  (#detections, 4 * #classes)
    `mix b`  (#detections, 4 * #classes, mix)
    `scores` (#detection, #classes)    
    """
    num_classes = cfg.MODEL.NUM_CLASSES
    cls_boxes = [[] for _ in range(num_classes)]
    mix_dets = [[] for _ in range(num_classes)]
    # Apply threshold on detection probabilities and apply NMS
    # Skip j = 0, because it's the background class
    for j in range(1, num_classes):
        inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
        scores_j = scores[inds, j]
        boxes_j = boxes[inds, j * 4:(j + 1) * 4]
        mix_boxes_j = mix_boxes[inds, j * 4:(j + 1) * 4]
        dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
            np.float32, copy=False
        )
        keep = box_utils.nms(dets_j, cfg.TEST.NMS)
        nms_dets = dets_j[keep, :]
        mix_dets[j] = mix_boxes_j[keep]
        cls_boxes[j] = nms_dets

    # Limit to max_per_image detections **over all classes**
    if cfg.TEST.DETECTIONS_PER_IM > 0:
        image_scores = np.hstack(
            [cls_boxes[j][:, -1] for j in range(1, num_classes)]
        )
        if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
            image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
            for j in range(1, num_classes):
                keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
                cls_boxes[j] = cls_boxes[j][keep, :]
                mix_dets[j] = mix_dets[j][keep]

    im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
    boxes = im_results[:, :-1]
    scores = im_results[:, -1]
    return scores, boxes, cls_boxes, mix_dets 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:44,代碼來源:test.py

示例3: im_detect_all

# 需要導入模塊: from detectron.utils import boxes [as 別名]
# 或者: from detectron.utils.boxes import nms [as 別名]
def im_detect_all(model, im, box_proposals, timers=None):
    if timers is None:
        timers = defaultdict(Timer)

    # Handle RetinaNet testing separately for now
    if cfg.RETINANET.RETINANET_ON:
        cls_boxes = test_retinanet.im_detect_bbox(model, im, timers)
        return cls_boxes, None, None

    timers['im_detect_bbox'].tic()
    if cfg.TEST.BBOX_AUG.ENABLED:
        scores, boxes, im_scale = im_detect_bbox_aug(model, im, box_proposals)
    else:
        scores, boxes, im_scale = im_detect_bbox(
            model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
        )
    timers['im_detect_bbox'].toc()

    # score and boxes are from the whole image after score thresholding and nms
    # (they are not separated by class)
    # cls_boxes boxes and scores are separated by class and in the format used
    # for evaluating results
    timers['misc_bbox'].tic()
    scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
    timers['misc_bbox'].toc()

    if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
        timers['im_detect_mask'].tic()
        if cfg.TEST.MASK_AUG.ENABLED:
            masks = im_detect_mask_aug(model, im, boxes)
        else:
            masks = im_detect_mask(model, im_scale, boxes)
        timers['im_detect_mask'].toc()

        timers['misc_mask'].tic()
        cls_segms = segm_results(
            cls_boxes, masks, boxes, im.shape[0], im.shape[1]
        )
        timers['misc_mask'].toc()
    else:
        cls_segms = None

    if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
        timers['im_detect_keypoints'].tic()
        if cfg.TEST.KPS_AUG.ENABLED:
            heatmaps = im_detect_keypoints_aug(model, im, boxes)
        else:
            heatmaps = im_detect_keypoints(model, im_scale, boxes)
        timers['im_detect_keypoints'].toc()

        timers['misc_keypoints'].tic()
        cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
        timers['misc_keypoints'].toc()
    else:
        cls_keyps = None

    return cls_boxes, cls_segms, cls_keyps 
開發者ID:fyangneil,項目名稱:Clustered-Object-Detection-in-Aerial-Image,代碼行數:59,代碼來源:test.py

示例4: box_results_with_nms_and_limit

# 需要導入模塊: from detectron.utils import boxes [as 別名]
# 或者: from detectron.utils.boxes import nms [as 別名]
def box_results_with_nms_and_limit(scores, boxes):
    """Returns bounding-box detection results by thresholding on scores and
    applying non-maximum suppression (NMS).

    `boxes` has shape (#detections, 4 * #classes), where each row represents
    a list of predicted bounding boxes for each of the object classes in the
    dataset (including the background class). The detections in each row
    originate from the same object proposal.

    `scores` has shape (#detection, #classes), where each row represents a list
    of object detection confidence scores for each of the object classes in the
    dataset (including the background class). `scores[i, j]`` corresponds to the
    box at `boxes[i, j * 4:(j + 1) * 4]`.
    """
    num_classes = cfg.MODEL.NUM_CLASSES
    cls_boxes = [[] for _ in range(num_classes)]
    # Apply threshold on detection probabilities and apply NMS
    # Skip j = 0, because it's the background class
    for j in range(1, num_classes):
        inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
        scores_j = scores[inds, j]
        boxes_j = boxes[inds, j * 4:(j + 1) * 4]
        dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
            np.float32, copy=False
        )
        if cfg.TEST.SOFT_NMS.ENABLED:
            nms_dets, _ = box_utils.soft_nms(
                dets_j,
                sigma=cfg.TEST.SOFT_NMS.SIGMA,
                overlap_thresh=cfg.TEST.NMS,
                score_thresh=0.0001,
                method=cfg.TEST.SOFT_NMS.METHOD
            )
        else:
            keep = box_utils.nms(dets_j, cfg.TEST.NMS)
            nms_dets = dets_j[keep, :]
        # Refine the post-NMS boxes using bounding-box voting
        if cfg.TEST.BBOX_VOTE.ENABLED:
            nms_dets = box_utils.box_voting(
                nms_dets,
                dets_j,
                cfg.TEST.BBOX_VOTE.VOTE_TH,
                scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
            )
        cls_boxes[j] = nms_dets

    # Limit to max_per_image detections **over all classes**
    if cfg.TEST.DETECTIONS_PER_IM > 0:
        image_scores = np.hstack(
            [cls_boxes[j][:, -1] for j in range(1, num_classes)]
        )
        if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
            image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
            for j in range(1, num_classes):
                keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
                cls_boxes[j] = cls_boxes[j][keep, :]

    im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
    boxes = im_results[:, :-1]
    scores = im_results[:, -1]
    return scores, boxes, cls_boxes 
開發者ID:zhaoweicai,項目名稱:Detectron-Cascade-RCNN,代碼行數:63,代碼來源:test.py


注:本文中的detectron.utils.boxes.nms方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。