本文整理汇总了Python中mmdet.core.bbox2roi方法的典型用法代码示例。如果您正苦于以下问题:Python core.bbox2roi方法的具体用法?Python core.bbox2roi怎么用?Python core.bbox2roi使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mmdet.core
的用法示例。
在下文中一共展示了core.bbox2roi方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _mask_point_forward_train
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def _mask_point_forward_train(self, x, sampling_results, mask_pred,
gt_masks, img_metas):
"""Run forward function and calculate loss for point head in
training."""
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
rel_roi_points = self.point_head.get_roi_rel_points_train(
mask_pred, pos_labels, cfg=self.train_cfg)
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
mask_point_target = self.point_head.get_targets(
rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
loss_mask_point = self.point_head.loss(mask_point_pred,
mask_point_target, pos_labels)
return loss_mask_point
示例2: forward_dummy
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# grid head
grid_rois = rois[:100]
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], grid_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
grid_pred = self.grid_head(grid_feats)
outs = outs + (grid_pred, )
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
示例3: _bbox_forward_train
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
num_imgs = len(img_metas)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
示例4: forward_dummy
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def forward_dummy(self, img):
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).cuda()
# bbox head
rois = bbox2roi([proposals])
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
# grid head
grid_rois = rois[:100]
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], grid_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
grid_pred = self.grid_head(grid_feats)
return rpn_outs, cls_score, bbox_pred, grid_pred
示例5: forward_dummy
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def forward_dummy(self, img):
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).cuda()
# bbox head
rois = bbox2roi([proposals])
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
outs += (cls_score, bbox_pred)
return outs
示例6: simple_test_mask
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factor, float):
scale_factor = det_bboxes.new_tensor(scale_factor)
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
mask_results['mask_pred'] = self._mask_point_forward_test(
x, mask_rois, det_labels, mask_results['mask_pred'], img_metas)
segm_result = self.mask_head.get_seg_masks(
mask_results['mask_pred'], _bboxes, det_labels, self.test_cfg,
ori_shape, scale_factor, rescale)
return segm_result
示例7: aug_test_mask
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
mask_results['mask_pred'] = self._mask_point_forward_test(
x, mask_rois, det_labels, mask_results['mask_pred'],
img_metas)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
示例8: _bbox_forward_train
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
bbox_results = super(GridRoIHead,
self)._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
# Grid head forward and loss
sampling_results = self._random_jitter(sampling_results, img_metas)
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
# GN in head does not support zero shape input
if pos_rois.shape[0] == 0:
return bbox_results
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
# Accelerate training
max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
sample_idx = torch.randperm(
grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
)]
grid_feats = grid_feats[sample_idx]
grid_pred = self.grid_head(grid_feats)
grid_targets = self.grid_head.get_targets(sampling_results,
self.train_cfg)
grid_targets = grid_targets[sample_idx]
loss_grid = self.grid_head.loss(grid_pred, grid_targets)
bbox_results['loss_bbox'].update(loss_grid)
return bbox_results
示例9: forward_dummy
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
示例10: _bbox_forward_train
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
示例11: _mask_forward_train
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head in
training."""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
if pos_rois.shape[0] == 0:
return dict(loss_mask=None)
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
if pos_inds.shape[0] == 0:
return dict(loss_mask=None)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
self.train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
return mask_results
示例12: simple_test_mask
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
mask_scores = [[] for _ in range(self.mask_head.num_classes)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
_bboxes = (
det_bboxes[:, :4] *
det_bboxes.new_tensor(scale_factor) if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
segm_result = self.mask_head.get_seg_masks(
mask_results['mask_pred'], _bboxes, det_labels, self.test_cfg,
ori_shape, scale_factor, rescale)
# get mask scores with mask iou head
mask_iou_pred = self.mask_iou_head(
mask_results['mask_feats'],
mask_results['mask_pred'][range(det_labels.size(0)),
det_labels])
mask_scores = self.mask_iou_head.get_mask_scores(
mask_iou_pred, det_bboxes, det_labels)
return segm_result, mask_scores
示例13: forward_dummy
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def forward_dummy(self, img):
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).cuda()
# bbox head
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
outs = outs + (cls_score, bbox_pred)
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
outs = outs + (mask_pred, )
return outs
示例14: simple_test
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=False)
# pack rois into bboxes
grid_rois = bbox2roi([det_bboxes[:, :4]])
grid_feats = self.grid_roi_extractor(
x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
if grid_rois.shape[0] != 0:
self.grid_head.test_mode = True
grid_pred = self.grid_head(grid_feats)
det_bboxes = self.grid_head.get_bboxes(det_bboxes,
grid_pred['fused'],
img_meta)
if rescale:
det_bboxes[:, :4] /= img_meta[0]['scale_factor']
else:
det_bboxes = torch.Tensor([])
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
return bbox_results
示例15: simple_test_mask
# 需要导入模块: from mmdet import core [as 别名]
# 或者: from mmdet.core import bbox2roi [as 别名]
def simple_test_mask(self,
x,
img_meta,
det_bboxes,
det_labels,
rescale=False):
# image shape of the first image in the batch (only one)
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
mask_scores = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape, scale_factor,
rescale)
# get mask scores with mask iou head
mask_iou_pred = self.mask_iou_head(
mask_feats,
mask_pred[range(det_labels.size(0)), det_labels + 1])
mask_scores = self.mask_iou_head.get_mask_scores(
mask_iou_pred, det_bboxes, det_labels)
return segm_result, mask_scores