本文整理汇总了Python中utils.nms_wrapper.nms方法的典型用法代码示例。如果您正苦于以下问题:Python nms_wrapper.nms方法的具体用法?Python nms_wrapper.nms怎么用?Python nms_wrapper.nms使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils.nms_wrapper
的用法示例。
在下文中一共展示了nms_wrapper.nms方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: nms_process
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def nms_process(num_classes, i, scores, boxes, cfg, min_thresh, all_boxes, max_per_image):
for j in range(1, num_classes): # ignore the bg(category_id=0)
inds = np.where(scores[:,j] > min_thresh)[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0,5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(np.float32, copy=False)
soft_nms = cfg.test_cfg.soft_nms
keep = nms(c_dets, cfg.test_cfg.iou, force_cpu=soft_nms)
keep = keep[:cfg.test_cfg.keep_per_class] # keep only the highest boxes
c_dets = c_dets[keep, :]
all_boxes[j][i] = c_dets
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
示例2: im_detect
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def im_detect(img, net, detector, transform, thresh=0.01):
with torch.no_grad():
t0 = time.time()
w, h = img.shape[1], img.shape[0]
x = transform(img)[0].unsqueeze(0)
x = x.cuda()
t1 = time.time()
output = net(x)
boxes, scores = detector.forward(output)
t2 = time.time()
max_conf, max_id = scores[0].topk(1, 1, True, True)
pos = max_id > 0
if len(pos) == 0:
return np.empty((0, 6))
boxes = boxes[0][pos.view(-1, 1).expand(len(pos), 4)].view(-1, 4)
scores = max_conf[pos].view(-1, 1)
max_id = max_id[pos].view(-1, 1)
inds = scores > thresh
if len(inds) == 0:
return np.empty((0, 6))
boxes = boxes[inds.view(-1, 1).expand(len(inds), 4)].view(-1, 4)
scores = scores[inds].view(-1, 1)
max_id = max_id[inds].view(-1, 1)
c_dets = torch.cat((boxes, scores, max_id.float()), 1).cpu().numpy()
img_classes = np.unique(c_dets[:, -1])
output = None
flag = False
for cls in img_classes:
cls_mask = np.where(c_dets[:, -1] == cls)[0]
image_pred_class = c_dets[cls_mask, :]
keep = nms(image_pred_class, cfg.TEST.NMS_OVERLAP, force_cpu=True)
keep = keep[:50]
image_pred_class = image_pred_class[keep, :]
if not flag:
output = image_pred_class
flag = True
else:
output = np.concatenate((output, image_pred_class), axis=0)
output[:, 0:2][output[:, 0:2] < 0] = 0
output[:, 2:4][output[:, 2:4] > 1] = 1
scale = np.array([w, h, w, h])
output[:, :4] = output[:, :4] * scale
t3 = time.time()
print("transform_t:", round(t1 - t0, 3), "detect_time:",
round(t2 - t1, 3), "nms_time:", round(t3 - t2, 3))
return output
示例3: detect_face
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def detect_face(net, img, resize):
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
#img = img[[2, 1, 0], :, :]
img = torch.from_numpy(img).unsqueeze(0)
if args.cuda:
img = img.cuda()
scale = scale.cuda()
out = net(img) # forward pass
priorbox = PriorBox(cfg, out[2], (im_height, im_width), phase='test')
priors = priorbox.forward()
if args.cuda:
priors = priors.cuda()
loc, conf, _ = out
print(loc.size(), conf.size())
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.data.cpu().numpy()[:, 1]
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
scores = scores[order]
#print(boxes)
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(dets, args.nms_threshold, force_cpu=args.cpu)
dets = dets[keep, :]
#print(dets)
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
return dets
示例4: detect_face
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def detect_face(net, img, resize):
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
if args.cuda:
img = img.cuda()
scale = scale.cuda()
out = net(img) # forward pass
priorbox = PriorBox(cfg, out[2], (im_height, im_width), phase='test')
priors = priorbox.forward()
if args.cuda:
priors = priors.cuda()
loc, conf, _ = out
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.data.cpu().numpy()[:, 1]
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
scores = scores[order]
#print(boxes)
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(dets, args.nms_threshold, force_cpu=args.cpu)
dets = dets[keep, :]
#print(dets)
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
return dets
示例5: test_net
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def test_net(save_folder, net, detector, cuda, testset, transform, top_k=300, thresh=0.005):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
num_images = len(testset)
num_classes = 81
all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
det_file = os.path.join(save_folder, 'detections.pkl')
for i in range(num_images):
img = testset.pull_image(i)
x = Variable(transform(img).unsqueeze(0), volatile=True)
x = x.cuda() if cuda else x
out = net(x)
boxes, scores = detector.forward(out, priors)
boxes = boxes[0]
scores = scores[0]
boxes = boxes.cpu().numpy()
scores = scores.cpu().numpy()
# scale back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]).cpu().numpy()
boxes *= scale
for j in range(1, num_classes):
inds = np.where(scores[:, j] > thresh)[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(np.float32, copy=False)
cpu = False
keep = nms(c_dets, 0.45, force_cpu=cpu)
keep = keep[:50]
c_dets = c_dets[keep, :]
all_boxes[j][i] = c_dets
if top_k > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1,num_classes)])
if len(image_scores) > top_k:
image_thresh = np.sort(image_scores)[-top_k]
for j in range(1, num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, protocol=2)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
示例6: test_net
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def test_net(save_folder, net, detector, cuda, testset, transform, top_k=300, thresh=0.005):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
num_images = len(testset)
num_classes = (21, 81)[args.dataset == 'COCO']
all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
det_file = os.path.join(save_folder, 'detections.pkl')
for i in range(num_images):
img = testset.pull_image(i)
x = Variable(transform(img).unsqueeze(0), volatile=True)
x = x.cuda() if cuda else x
out = net(x)
boxes, scores = detector.forward(out, priors)
boxes = boxes[0]
scores = scores[0]
boxes = boxes.cpu().numpy()
scores = scores.cpu().numpy()
# scale back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]).cpu().numpy()
boxes *= scale
_t['nms'].tic()
for j in range(1, num_classes):
inds = np.where(scores[:, j] > thresh)[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(np.float32, copy=False)
cpu = True if args.dataset == 'VOC' else False
keep = nms(c_dets, 0.45, force_cpu=cpu)
keep = keep[:50]
c_dets = c_dets[keep, :]
all_boxes[j][i] = c_dets
if top_k > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1,num_classes)])
if len(image_scores) > top_k:
image_thresh = np.sort(image_scores)[-top_k]
for j in range(1, num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, protocol=2)
print('Evaluating detections')
testset.evaluate_detections(all_boxes, save_folder)
示例7: detection_postprecess
# 需要导入模块: from utils import nms_wrapper [as 别名]
# 或者: from utils.nms_wrapper import nms [as 别名]
def detection_postprecess(detection, iou_thresh, num_classes, input_wh, ori_wh, use_pad=False, nms_conf=0.4):
assert detection.size(0) == 1, "only support batch_size == 1"
conf_mask = (detection[:,:,4] > iou_thresh).float().unsqueeze(2)
detection = detection * conf_mask
try:
ind_nz = torch.nonzero(detection[:,:,4]).transpose(0,1).contiguous()
except:
print("detect no results")
return np.empty([0, 5], dtype=np.float32)
bbox_pred = point_form(detection[:, :, :4].view(-1, 4))
conf_pred = detection[:, :, 4].view(-1, 1)
cls_pred = detection[:, :, 5:].view(-1, num_classes)
max_conf, max_conf_idx = torch.max(cls_pred, 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_idx = max_conf_idx.float().unsqueeze(1)
# score = (conf_pred * max_conf).view(-1, 1)
score = conf_pred
image_pred = torch.cat((bbox_pred, score, max_conf, max_conf_idx), 1)
non_zero_ind = (torch.nonzero(image_pred[:,4]))
image_pred_ = image_pred[non_zero_ind.squeeze(),:].view(-1, 7)
try:
img_classes = unique(image_pred_[:,-1])
except:
print("no class find")
return np.empty([0, 7], dtype=np.float32)
flag = False
out_out = None
for cls in img_classes:
cls_mask = image_pred_*(image_pred_[:,-1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:,-2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1,7)
keep = nms(image_pred_class.cpu().numpy(), nms_conf, force_cpu=True)
image_pred_class = image_pred_class[keep]
if not flag:
out_put = image_pred_class
flag = True
else:
out_put = torch.cat((out_put, image_pred_class), 0)
image_pred_class = out_put
if use_pad:
scaling_factor = min(input_wh[0] / ori_wh[0], input_wh[1] / ori_wh[1])
image_pred_class[:,[0,2]] -= (input_wh[0] - scaling_factor * ori_wh[0]) / 2
image_pred_class[:,[1,3]] -= (input_wh[1] - scaling_factor * ori_wh[1]) / 2
image_pred_class[:,:4] /= scaling_factor
else:
image_pred_class[:,[0,2]] /= input_wh[0]
image_pred_class[:,[1,3]] /= input_wh[1]
image_pred_class[:, [0,2]] *= ori_wh[0]
image_pred_class[:, [1,3]] *= ori_wh[1]
for i in range(image_pred_class.shape[0]):
image_pred_class[i, [0,2]] = torch.clamp(image_pred_class[i, [0,2]], 0.0, ori_wh[0])
image_pred_class[i, [1,3]] = torch.clamp(image_pred_class[i, [1,3]], 0.0, ori_wh[1])
return image_pred_class.cpu().numpy()