本文整理汇总了Python中detectron.utils.boxes.nms方法的典型用法代码示例。如果您正苦于以下问题:Python boxes.nms方法的具体用法?Python boxes.nms怎么用?Python boxes.nms使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类detectron.utils.boxes
的用法示例。
在下文中一共展示了boxes.nms方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: detect_image
# 需要导入模块: from detectron.utils import boxes [as 别名]
# 或者: from detectron.utils.boxes import nms [as 别名]
def detect_image(detectron_model, image, args):
"""Given an image and a detectron model, extract object boxes,
classes, confidences and features from the image using the model.
Parameters
----------
detectron_model
Detectron model.
image : np.ndarray
Image in BGR format.
args : argparse.Namespace
Parsed command-line arguments.
Returns
-------
np.ndarray, np.ndarray, np.ndarray, np.ndarray
Object bounding boxes, classes, confidence and features.
"""
scores, cls_boxes, im_scale = detectron_test.im_detect_bbox(
detectron_model,
image,
detectron_config.TEST.SCALE,
detectron_config.TEST.MAX_SIZE,
boxes=None,
)
num_proposals = scores.shape[0]
rois = workspace.FetchBlob(f"gpu_{args.gpu_id}/rois")
features = workspace.FetchBlob(
f"gpu_{args.gpu_id}/{args.feat_name}"
)
cls_boxes = rois[:, 1:5] / im_scale
max_conf = np.zeros((num_proposals,), dtype=np.float32)
max_cls = np.zeros((num_proposals,), dtype=np.int32)
max_box = np.zeros((num_proposals, 4), dtype=np.float32)
for cls_ind in range(1, detectron_config.MODEL.NUM_CLASSES):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(
np.float32
)
keep = np.array(detectron_nms(dets, detectron_config.TEST.NMS))
idxs_update = np.where(cls_scores[keep] > max_conf[keep])
keep_idxs = keep[idxs_update]
max_conf[keep_idxs] = cls_scores[keep_idxs]
max_cls[keep_idxs] = cls_ind
max_box[keep_idxs] = dets[keep_idxs][:, :4]
keep_boxes = np.argsort(max_conf)[::-1][:args.max_boxes]
boxes = max_box[keep_boxes, :]
classes = max_cls[keep_boxes]
confidence = max_conf[keep_boxes]
features = features[keep_boxes, :]
return boxes, features, classes, confidence
开发者ID:batra-mlp-lab,项目名称:visdial-challenge-starter-pytorch,代码行数:58,代码来源:extract_features_detectron.py
示例2: mix_box_results_with_nms_and_limit
# 需要导入模块: from detectron.utils import boxes [as 别名]
# 或者: from detectron.utils.boxes import nms [as 别名]
def mix_box_results_with_nms_and_limit(scores, boxes, mix_boxes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` (#detections, 4 * #classes)
`mix b` (#detections, 4 * #classes, mix)
`scores` (#detection, #classes)
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
mix_dets = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
mix_boxes_j = mix_boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
np.float32, copy=False
)
keep = box_utils.nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
mix_dets[j] = mix_boxes_j[keep]
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
mix_dets[j] = mix_dets[j][keep]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes, mix_dets
示例3: im_detect_all
# 需要导入模块: from detectron.utils import boxes [as 别名]
# 或者: from detectron.utils.boxes import nms [as 别名]
def im_detect_all(model, im, box_proposals, timers=None):
if timers is None:
timers = defaultdict(Timer)
# Handle RetinaNet testing separately for now
if cfg.RETINANET.RETINANET_ON:
cls_boxes = test_retinanet.im_detect_bbox(model, im, timers)
return cls_boxes, None, None
timers['im_detect_bbox'].tic()
if cfg.TEST.BBOX_AUG.ENABLED:
scores, boxes, im_scale = im_detect_bbox_aug(model, im, box_proposals)
else:
scores, boxes, im_scale = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
timers['im_detect_bbox'].toc()
# score and boxes are from the whole image after score thresholding and nms
# (they are not separated by class)
# cls_boxes boxes and scores are separated by class and in the format used
# for evaluating results
timers['misc_bbox'].tic()
scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
timers['misc_bbox'].toc()
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes)
else:
masks = im_detect_mask(model, im_scale, boxes)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(
cls_boxes, masks, boxes, im.shape[0], im.shape[1]
)
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
return cls_boxes, cls_segms, cls_keyps
示例4: box_results_with_nms_and_limit
# 需要导入模块: from detectron.utils import boxes [as 别名]
# 或者: from detectron.utils.boxes import nms [as 别名]
def box_results_with_nms_and_limit(scores, boxes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
np.float32, copy=False
)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils.soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils.nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils.box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes