本文整理汇总了Python中model.bbox_transform.clip_boxes方法的典型用法代码示例。如果您正苦于以下问题:Python bbox_transform.clip_boxes方法的具体用法?Python bbox_transform.clip_boxes怎么用?Python bbox_transform.clip_boxes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.bbox_transform
的用法示例。
在下文中一共展示了bbox_transform.clip_boxes方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: proposal_top_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, im_info, _feat_stride, anchors, num_anchors):
"""A layer that just selects the top region proposals
without using non-maximal suppression,
For details please see the technical report
"""
rpn_top_n = cfg.TEST.RPN_TOP_N
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.view(-1, 4)
scores = scores.contiguous().view(-1, 1)
length = scores.size(0)
if length < rpn_top_n:
# Random selection, maybe unnecessary and loses good proposals
# But such case rarely happens
top_inds = torch.from_numpy(npr.choice(length, size=rpn_top_n, replace=True)).long().cuda()
else:
top_inds = scores.sort(0, descending=True)[1]
top_inds = top_inds[:rpn_top_n]
top_inds = top_inds.view(rpn_top_n)
# Do the selection here
anchors = anchors[top_inds, :].contiguous()
rpn_bbox_pred = rpn_bbox_pred[top_inds, :].contiguous()
scores = scores[top_inds].contiguous()
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
# Clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = proposals.data.new(proposals.size(0), 1).zero_()
blob = torch.cat([batch_inds, proposals], 1)
return blob, scores
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:41,代码来源:proposal_top_layer.py
示例2: proposal_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.view((-1, 4))
scores = scores.contiguous().view(-1, 1)
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
scores, order = scores.view(-1).sort(descending=True)
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
scores = scores[:pre_nms_topN].view(-1, 1)
proposals = proposals[order.data, :]
# Non-maximal suppression
keep = nms(torch.cat((proposals, scores), 1).data, nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep,]
# Only support single image as input
batch_inds = Variable(proposals.data.new(proposals.size(0), 1).zero_())
blob = torch.cat((batch_inds, proposals), 1)
return blob, scores
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:40,代码来源:proposal_layer.py
示例3: proposal_top_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, im_info, _feat_stride, anchors, num_anchors):
"""A layer that just selects the top region proposals
without using non-maximal suppression,
For details please see the technical report
"""
rpn_top_n = cfg.TEST.RPN_TOP_N
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
length = scores.shape[0]
if length < rpn_top_n:
# Random selection, maybe unnecessary and loses good proposals
# But such case rarely happens
top_inds = npr.choice(length, size=rpn_top_n, replace=True)
else:
top_inds = scores.argsort(0)[::-1]
top_inds = top_inds[:rpn_top_n]
top_inds = top_inds.reshape(rpn_top_n, )
# Do the selection here
anchors = anchors[top_inds, :]
rpn_bbox_pred = rpn_bbox_pred[top_inds, :]
scores = scores[top_inds]
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
# Clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
示例4: proposal_top_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, im_info, anchors, num_anchors):
"""A layer that just selects the top region proposals
without using non-maximal suppression,
For details please see the technical report
"""
rpn_top_n = cfg.TEST.RPN_TOP_N
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
length = scores.shape[0]
if length < rpn_top_n:
# Random selection, maybe unnecessary and loses good proposals
# But such case rarely happens
top_inds = npr.choice(length, size=rpn_top_n, replace=True)
else:
top_inds = scores.argsort(0)[::-1]
top_inds = top_inds[:rpn_top_n]
top_inds = top_inds.reshape(rpn_top_n, )
# Do the selection here
anchors = anchors[top_inds, :]
rpn_bbox_pred = rpn_bbox_pred[top_inds, :]
scores = scores[top_inds]
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
# Clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
示例5: proposal_top_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_top_layer(rpn_cls_prob, rpn_bbox_pred, im_info, _feat_stride, anchors, num_anchors):
"""A layer that just selects the top region proposals
without using non-maximal suppression,
For details please see the technical report
"""
rpn_top_n = cfg.TEST.RPN_TOP_N
im_info = im_info[0]
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
length = scores.shape[0]
if length < rpn_top_n:
# Random selection, maybe unnecessary and loses good proposals
# But such case rarely happens
top_inds = npr.choice(length, size=rpn_top_n, replace=True)
else:
top_inds = scores.argsort(0)[::-1]
top_inds = top_inds[:rpn_top_n]
top_inds = top_inds.reshape(rpn_top_n, )
# Do the selection here
anchors = anchors[top_inds, :]
rpn_bbox_pred = rpn_bbox_pred[top_inds, :]
scores = scores[top_inds]
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
# Clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
示例6: proposal_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
im_info = im_info[0]
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
示例7: proposal_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
示例8: pad_rois
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def pad_rois(rois, im_info, is_training):
"""Pad rois to utilize contextual information and to alleviate truncation
"""
nroi = rois.shape[0]
proposals = np.zeros((nroi, 4), dtype=np.float32)
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
dw = cfg.POOL_PAD_RATIO * w
dh = cfg.POOL_PAD_RATIO * h
nroi = rois.shape[0]
if is_training:
nw = npr.rand(nroi)
nh = npr.rand(nroi)
else:
nw = np.ones(nroi) * 0.5
nh = np.ones(nroi) * 0.5
proposals[:, 0] = rois[:, 1] - (dw - nw * (1 + 2 * cfg.POOL_PAD_RATIO) / 15 * w)
proposals[:, 1] = rois[:, 2] - (dh - nh * (1 + 2 * cfg.POOL_PAD_RATIO) / 15 * h)
proposals[:, 2] = rois[:, 3] + (dw - (1-nw) * (1 + 2 * cfg.POOL_PAD_RATIO) / 15 * w)
proposals[:, 3] = rois[:, 4] + (dh - (1-nh) * (1 + 2 * cfg.POOL_PAD_RATIO) / 15 * h)
proposals = clip_boxes(proposals, im_info[:2])
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob
示例9: proposal_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.view((-1, 4))
scores = scores.contiguous().view(-1, 1)
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
scores, order = scores.view(-1).sort(descending=True)
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
scores = scores[:pre_nms_topN].view(-1, 1)
proposals = proposals[order.data, :]
# Non-maximal suppression
keep = nms(torch.cat((proposals, scores), 1).data, nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :] # test(300,4)
scores = scores[keep,]
# Our RPN implementation only supports a single input image,
# so all batch inds are 0
# 即这些roi都属于一个图片,如果后续实现了多个输入图片,这个roi要区分它属于哪一个图片(即哪一个batch)
batch_inds = Variable(proposals.data.new(proposals.size(0), 1).zero_())
blob = torch.cat((batch_inds, proposals), 1)
return blob, scores
示例10: proposal_layer_fpn
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer_fpn(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
proposals_total = []
scores_total = []
for idx in range(len(rpn_cls_prob)):
# Get the scores and bounding boxes
scores = rpn_cls_prob[idx][:, :, :, num_anchors:]
rpn_bbox_pred[idx] = rpn_bbox_pred[idx].view((-1, 4))
scores = scores.contiguous().view(-1, 1)
proposals = bbox_transform_inv(anchors[idx], rpn_bbox_pred[idx])
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
scores, order = scores.view(-1).sort(descending=True)
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
scores = scores[:pre_nms_topN].view(-1, 1)
proposals = proposals[order.data, :]
proposals_total.append(proposals)
scores_total.append(scores)
proposals = torch.cat(proposals_total)
scores = torch.cat(scores_total)
# Non-maximal suppression
keep = nms(torch.cat((proposals, scores), 1).data, nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep,]
# Only support single image as input
batch_inds = Variable(proposals.data.new(proposals.size(0), 1).zero_())
blob = torch.cat((batch_inds, proposals), 1)
return blob, scores
示例11: proposal_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].ANCHOR_MIN_SIZE
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# removed predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
# keep = _filter_boxes(proposals, min_size * im_info[2])
# proposals = proposals[keep, :]
# scores = scores[keep]
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
示例12: proposal_layer
# 需要导入模块: from model import bbox_transform [as 别名]
# 或者: from model.bbox_transform import clip_boxes [as 别名]
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, anchors, num_anchors):
"""
A simplified version compared to fast/er RCNN
For details please see the technical report
:param
rpn_cls_prob: (1, H, W, Ax2) softmax result of rpn scores
rpn_bbox_pred: (1, H, W, Ax4) 1x1 conv result for rpn bbox
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes for foreground (text)
# The order in last dim is related to network.py:
# self._reshape_layer(rpn_cls_prob_reshape, self._num_anchors * 2, "rpn_cls_prob")
# scores = rpn_cls_prob[:, :, :, num_anchors:] # old
height, width = rpn_cls_prob.shape[1:3] # feature-map的高宽
scores = np.reshape(np.reshape(rpn_cls_prob, [1, height, width, num_anchors, 2])[:, :, :, :, 1],
[1, height, width, num_anchors])
rpn_bbox_pred = rpn_bbox_pred.reshape((-1, 4))
scores = scores.reshape((-1, 1))
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh, not cfg.USE_GPU_NMS)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
blob = np.hstack((scores.astype(np.float32, copy=False), proposals.astype(np.float32, copy=False)))
return blob, scores