本文整理匯總了Python中utils.boxes.bbox_overlaps方法的典型用法代碼示例。如果您正苦於以下問題:Python boxes.bbox_overlaps方法的具體用法?Python boxes.bbox_overlaps怎麽用?Python boxes.bbox_overlaps使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils.boxes
的用法示例。
在下文中一共展示了boxes.bbox_overlaps方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: box_filter
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def box_filter(boxes, must_overlap=False):
""" Only include boxes that overlap as possible relations.
If no overlapping boxes, use all of them."""
n_cands = boxes.shape[0]
overlaps = box_utils.bbox_overlaps(boxes.astype(np.float32), boxes.astype(np.float32)) > 0
np.fill_diagonal(overlaps, 0)
all_possib = np.ones_like(overlaps, dtype=np.bool)
np.fill_diagonal(all_possib, 0)
if must_overlap:
possible_boxes = np.column_stack(np.where(overlaps))
if possible_boxes.size == 0:
possible_boxes = np.column_stack(np.where(all_possib))
else:
possible_boxes = np.column_stack(np.where(all_possib))
return possible_boxes
示例2: _compute_targets
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
示例3: _build_graph
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _build_graph(boxes, iou_threshold):
"""Build graph based on box IoU"""
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
boxes.astype(dtype=np.float32, copy=False))
return (overlaps > iou_threshold).astype(np.float32)
示例4: test_cython_bbox_iou_against_coco_api_bbox_iou
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def test_cython_bbox_iou_against_coco_api_bbox_iou(self):
"""Check that our cython implementation of bounding box IoU overlap
matches the COCO API implementation.
"""
def _do_test(b1, b2):
# Compute IoU overlap with the cython implementation
cython_iou = box_utils.bbox_overlaps(b1, b2)
# Compute IoU overlap with the COCO API implementation
# (requires converting boxes from xyxy to xywh format)
xywh_b1 = box_utils.xyxy_to_xywh(b1)
xywh_b2 = box_utils.xyxy_to_xywh(b2)
not_crowd = [int(False)] * b2.shape[0]
coco_ious = COCOmask.iou(xywh_b1, xywh_b2, not_crowd)
# IoUs should be similar
np.testing.assert_array_almost_equal(
cython_iou, coco_ious, decimal=5
)
# Test small boxes
b1 = random_boxes([10, 10, 20, 20], 5, 10)
b2 = random_boxes([10, 10, 20, 20], 5, 10)
_do_test(b1, b2)
# Test bigger boxes
b1 = random_boxes([10, 10, 110, 20], 20, 10)
b2 = random_boxes([10, 10, 110, 20], 20, 10)
_do_test(b1, b2)
示例5: get_gt_keypoints
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def get_gt_keypoints(entry, human_boxes, interaction_human_inds, im_scale):
gt_human_inds = np.where(entry['gt_classes'] == 1)[0]
gt_human_boxes = entry['boxes'][gt_human_inds]
human_to_gt_ov = box_utils.bbox_overlaps(
(human_boxes[:, 1:]/im_scale).astype(dtype=np.float32, copy=False),
gt_human_boxes.astype(dtype=np.float32, copy=False))
human_to_gt_inds = human_to_gt_ov.argmax(axis=1)
human_to_gt_box_ind = gt_human_inds[human_to_gt_inds[interaction_human_inds]]
gt_keypoints = entry['gt_keypoints'][human_to_gt_box_ind]
return gt_keypoints
示例6: _compute_targets
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes'][:, :4]
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
示例7: _compute_pred_matches
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh=0.5, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
rel_iou = bbox_overlaps(gt_box[None, :], boxes)[0]
inds = rel_iou >= iou_thresh
else:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
示例8: _compute_targets
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th, tx2, ty2, tw2, th2...)
# (for each time frame)
targets = np.zeros((rois.shape[0], rois.shape[1] + 1), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
示例9: _compute_pairwise_iou
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _compute_pairwise_iou(a, b):
"""
a, b (np.ndarray) of shape Nx4T and Mx4T.
The output is NxM, for each combination of boxes.
"""
return box_utils.bbox_overlaps(a, b)
示例10: _get_proposal_clusters
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _get_proposal_clusters(all_rois, proposals, im_labels, cls_prob):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
# overlaps: (rois x gt_boxes)
gt_boxes = proposals['gt_boxes']
gt_labels = proposals['gt_classes']
gt_scores = proposals['gt_scores']
overlaps = box_utils.bbox_overlaps(
all_rois.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_labels[gt_assignment, 0]
cls_loss_weights = gt_scores[gt_assignment, 0]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those with < FG_THRESH overlap
bg_inds = np.where(max_overlaps < cfg.TRAIN.FG_THRESH)[0]
ig_inds = np.where(max_overlaps < cfg.TRAIN.BG_THRESH)[0]
cls_loss_weights[ig_inds] = 0.0
labels[bg_inds] = 0
gt_assignment[bg_inds] = -1
img_cls_loss_weights = np.zeros(gt_boxes.shape[0], dtype=np.float32)
pc_probs = np.zeros(gt_boxes.shape[0], dtype=np.float32)
pc_labels = np.zeros(gt_boxes.shape[0], dtype=np.int32)
pc_count = np.zeros(gt_boxes.shape[0], dtype=np.int32)
for i in xrange(gt_boxes.shape[0]):
po_index = np.where(gt_assignment == i)[0]
img_cls_loss_weights[i] = np.sum(cls_loss_weights[po_index])
pc_labels[i] = gt_labels[i, 0]
pc_count[i] = len(po_index)
pc_probs[i] = np.average(cls_prob[po_index, pc_labels[i]])
return labels, cls_loss_weights, gt_assignment, pc_labels, pc_probs, pc_count, img_cls_loss_weights
示例11: test_det_bbox_gt_action
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def test_det_bbox_gt_action(hoi_blob_in, entry, im_info):
# check interaction branch, bbox res from test, interaction from gt
gt_human_inds = np.where(entry['gt_classes'] == 1)[0]
gt_human_boxes = entry['boxes'][gt_human_inds]
pred_human_boxes = hoi_blob_in['human_boxes']/im_info[0, 2]
human_pred_gt_overlaps = box_utils.bbox_overlaps(
pred_human_boxes[:, 1:].astype(dtype=np.float32, copy=False),
gt_human_boxes.astype(dtype=np.float32, copy=False))
human_pred_to_gt_inds = np.argmax(human_pred_gt_overlaps, axis=1)
human_ious = human_pred_gt_overlaps.max(axis=1)[:, None]
human_score = np.zeros(human_ious.shape)
human_score[np.where(human_ious > 0.5)] = 1
# assign gt interaction to mapping pred bboxes
human_action = entry['gt_actions'][gt_human_inds[human_pred_to_gt_inds]]
# multiply iou to human action, better localization better action score
# human_action = human_ious * human_action
human_action = human_score * human_action
# ------------------------------- Targets -----------------------------------
# ipdb.set_trace()
pred_target_boxes = hoi_blob_in['object_boxes']/im_info[0, 2]
target_pred_gt_overlaps = box_utils.bbox_overlaps(
pred_target_boxes[:, 1:].astype(dtype=np.float32, copy=False),
entry['boxes'].astype(dtype=np.float32, copy=False))
target_pred_to_gt_inds = np.argmax(target_pred_gt_overlaps, axis=1)
target_ious = target_pred_gt_overlaps.max(axis=1)[:, None]
target_score = np.zeros(target_ious.shape)
target_score[np.where(target_ious > 0.5)] = 1
gt_action_mat = generate_action_mat(entry['gt_role_id'])
# ToDo: there is a problem, here we ignore `interaction triplets` that
# targets is invisible
action_labels = gt_action_mat[gt_human_inds[human_pred_to_gt_inds[hoi_blob_in['interaction_human_inds']]],
target_pred_to_gt_inds[hoi_blob_in['interaction_object_inds']]]
# triplet_ious = human_ious[hoi_blob_in['interaction_human_inds']] * \
# target_ious[hoi_blob_in['interaction_object_inds']]
# # multiply iou
# action_labels = triplet_ious[:, None] * action_labels
triplet_scores = human_score[hoi_blob_in['interaction_human_inds']] * \
target_score[hoi_blob_in['interaction_object_inds']]
action_labels = triplet_scores[:, None] * action_labels
# convert to 24-class
interaction_action_mask = np.array(cfg.VCOCO.ACTION_MASK).T
action_labels = action_labels[:, np.where(interaction_action_mask > 0)[0], np.where(interaction_action_mask > 0)[1]]
hoi_blob_in['human_action_score'] = torch.from_numpy(human_action).float().cuda()
hoi_blob_in['interaction_action_score'] = torch.from_numpy(action_labels).float().cuda()
return hoi_blob_in
示例12: remove_mis_group
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def remove_mis_group(hoi_blob_in, entry, im_scale):
gt_human_inds = np.where(entry['gt_classes'] == 1)[0]
gt_human_boxes = entry['boxes'][gt_human_inds]
pred_human_boxes = hoi_blob_in['human_boxes'][:, 1:]/im_scale
# if len(pred_human_boxes[0]) == 0:
# return None
human_pred_gt_overlaps = box_utils.bbox_overlaps(
pred_human_boxes.astype(dtype=np.float32, copy=False),
gt_human_boxes.astype(dtype=np.float32, copy=False))
human_pred_to_gt_inds = np.argmax(human_pred_gt_overlaps, axis=1)
human_ious = human_pred_gt_overlaps.max(axis=1)[:, None]
valid_human_ind = np.where(human_ious > 0.5)[0]
# ------------------------------- Targets -----------------------------------
# ipdb.set_trace()
pred_obj_boxes = hoi_blob_in['object_boxes'][:, 1:]/im_scale
obj_pred_gt_overlaps = box_utils.bbox_overlaps(
pred_obj_boxes.astype(dtype=np.float32, copy=False),
entry['boxes'].astype(dtype=np.float32, copy=False))
obj_pred_to_gt_inds = np.argmax(obj_pred_gt_overlaps, axis=1)
obj_ious = obj_pred_gt_overlaps.max(axis=1)[:, None]
valid_obj_ind = np.where(obj_ious > 0.5)[0]
interact_matrix = np.zeros([pred_human_boxes.shape[0], pred_obj_boxes.shape[0]])
interact_matrix[hoi_blob_in['interaction_human_inds'], hoi_blob_in['interaction_object_inds']] = 1
valid_matrix = np.zeros([pred_human_boxes.shape[0], pred_obj_boxes.shape[0]]) - 1
valid_matrix[valid_human_ind, :] += 1
valid_matrix[:, valid_obj_ind] += 1
valid_matrix = valid_matrix * interact_matrix
valid_interaction_human_inds, valid_interaction_obj_inds = np.where(valid_matrix==1)
gt_action_mat = generate_action_mat(entry['gt_role_id'])
# ToDo: there is a problem, here we ignore `interaction triplets` that
# targets is invisible
action_labels = gt_action_mat[
gt_human_inds[human_pred_to_gt_inds[valid_interaction_human_inds]],
obj_pred_to_gt_inds[valid_interaction_obj_inds]]
# action_labels = action_labels.reshape(action_labels.shape[0], -1)
no_gt_rel_ind = np.where(action_labels.sum(1).sum(1) == 0)
ret = np.ones([pred_human_boxes.shape[0], pred_obj_boxes.shape[0]])
ret[valid_interaction_human_inds[no_gt_rel_ind],
valid_interaction_obj_inds[no_gt_rel_ind]] = 0
return ret
示例13: _merge_proposal_boxes_into_roidb
# 需要導入模塊: from utils import boxes [as 別名]
# 或者: from utils.boxes import bbox_overlaps [as 別名]
def _merge_proposal_boxes_into_roidb(roidb, box_list):
assert len(box_list) == len(roidb)
for i, entry in enumerate(roidb):
boxes = box_list[i]
num_boxes = boxes.shape[0]
gt_overlaps = np.zeros(
(num_boxes, entry['gt_overlaps'].shape[1]),
dtype=entry['gt_overlaps'].dtype)
box_to_gt_ind_map = -np.ones(
(num_boxes), dtype=entry['box_to_gt_ind_map'].dtype)
# Note: unlike in other places, here we intentionally include all gt
# rois, even ones marked as crowd. Boxes that overlap with crowds will
# be filtered out later (see: _filter_crowd_proposals).
gt_inds = np.where(entry['gt_classes'] > 0)[0]
if len(gt_inds) > 0:
gt_boxes = entry['boxes'][gt_inds, :]
gt_classes = entry['gt_classes'][gt_inds]
proposal_to_gt_overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
argmaxes = proposal_to_gt_overlaps.argmax(axis=1)
# Amount of that overlap
maxes = proposal_to_gt_overlaps.max(axis=1)
# Those boxes with non-zero overlap with gt boxes
I = np.where(maxes > 0)[0]
# Record max overlaps with the class of the appropriate gt box
gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]
entry['boxes'] = np.append(
entry['boxes'],
boxes.astype(entry['boxes'].dtype, copy=False),
axis=0)
entry['gt_classes'] = np.append(
entry['gt_classes'],
np.zeros((num_boxes), dtype=entry['gt_classes'].dtype))
entry['seg_areas'] = np.append(
entry['seg_areas'],
np.zeros((num_boxes), dtype=entry['seg_areas'].dtype))
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['is_crowd'] = np.append(
entry['is_crowd'],
np.zeros((num_boxes), dtype=entry['is_crowd'].dtype))
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'],
box_to_gt_ind_map.astype(
entry['box_to_gt_ind_map'].dtype, copy=False))