本文整理匯總了Python中mmdet.apis.inference_detector方法的典型用法代碼示例。如果您正苦於以下問題:Python apis.inference_detector方法的具體用法?Python apis.inference_detector怎麽用?Python apis.inference_detector使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mmdet.apis
的用法示例。
在下文中一共展示了apis.inference_detector方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
示例2: main
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
示例3: main
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def main():
args = parse_args()
model = init_detector(
args.config, args.checkpoint, device=torch.device('cuda', args.device))
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
show_result(
img, result, model.CLASSES, score_thr=args.score_thr, wait_time=1)
示例4: inference_model
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def inference_model(
images,
detection_model,
skeleton_model,
):
batch_size = images.size()[0]
skeleton_results = dict()
# process each batch image by image
for idx, b in enumerate(batch_size):
# get person bboxes
image = images[b, :, :, :]
bbox_result = inference_detector(detection_model, image)
from IPython import embed
embed()
# person_bboxes = bbox_result_filter(bbox_result)
# # get skeleton estimation
# if person_bboxes.shape[0] > 0:
# image, meta = preprocess_skeleton_inputs(image, person_bboxes)
# skeleton_result, maxval= inference_twodimestimator(skeleton_model, image)
# skeleton_results[str(idx)] = skeleton_result
# return skeleton_results
示例5: inference_single
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def inference_single(self, imagname, slide_size, chip_size):
img = mmcv.imread(imagname)
height, width, channel = img.shape
slide_h, slide_w = slide_size
hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
for i in tqdm(range(int(width / slide_w + 1))):
for j in range(int(height / slide_h) + 1):
subimg = np.zeros((hn, wn, channel))
# print('i: ', i, 'j: ', j)
chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3]
subimg[:chip.shape[0], :chip.shape[1], :] = chip
chip_detections = inference_detector(self.model, subimg)
# print('result: ', result)
for cls_id, name in enumerate(self.classnames):
chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w
chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h
# import pdb;pdb.set_trace()
try:
total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id]))
except:
import pdb; pdb.set_trace()
# nms
for i in range(len(self.classnames)):
keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1)
total_detections[i] = total_detections[i][keep]
return total_detections
示例6: main
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def main():
args = parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config_file, args.checkpoint, device='cuda:0')
# test a single image and show the results
img = args.input
result = inference_detector(model, img)
# visualize the results in a new window
# or save the visualization results to image files
show_result(
img, result, model.CLASSES, out_file=img.split('.')[0] + '_result.jpg')
示例7: main
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def main():
args = parse_args()
model = init_detector(args.config, args.checkpoint, device='cuda:0')
result = inference_detector(model, args.input)
result = result[:-1] # ignore dummy
show_result(
args.input,
result,
CLASS_NAMES,
show=False,
out_file=args.output,
)
示例8: human_boxes_get
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def human_boxes_get(model, img, score_thr=0.5):
if isinstance(img, str):
img = mmcv.imread(img)
result = inference_detector(model, img, cfg, device='cuda:0')
bboxes, scores = re_result(result, score_thr=score_thr)
return bboxes, scores
示例9: worker
# 需要導入模塊: from mmdet import apis [as 別名]
# 或者: from mmdet.apis import inference_detector [as 別名]
def worker(video_file, index, detection_cfg, skeleton_cfg, skeleon_data_cfg,
device, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(device)
video_frames = mmcv.VideoReader(video_file)
# load model
detection_model_file = detection_cfg.model_cfg
detection_checkpoint_file = get_mmskeleton_url(
detection_cfg.checkpoint_file)
detection_model = init_detector(detection_model_file,
detection_checkpoint_file,
device='cpu')
skeleton_model_file = skeleton_cfg.model_cfg
skeletion_checkpoint_file = skeleton_cfg.checkpoint_file
skeleton_model = init_twodimestimator(skeleton_model_file,
skeletion_checkpoint_file,
device='cpu')
detection_model = detection_model.cuda()
skeleton_model = skeleton_model.cuda()
for idx in index:
skeleton_result = dict()
image = video_frames[idx]
draw_image = image.copy()
bbox_result = inference_detector(detection_model, image)
person_bbox, labels = VideoDemo.bbox_filter(bbox_result,
detection_cfg.bbox_thre)
if len(person_bbox) > 0:
person, meta = VideoDemo.skeleton_preprocess(
image[:, :, ::-1], person_bbox, skeleon_data_cfg)
preds, maxvals = inference_twodimestimator(skeleton_model,
person.cuda(), meta,
True)
results = VideoDemo.skeleton_postprocess(preds, maxvals, meta)
if skeleon_data_cfg.save_video:
file = os.path.join(skeleon_data_cfg.img_dir,
'{}.png'.format(idx))
mmcv.imshow_det_bboxes(draw_image,
person_bbox,
labels,
detection_model.CLASSES,
score_thr=detection_cfg.bbox_thre,
show=False,
wait_time=0)
save(image, draw_image, results, file)
else:
preds, maxvals = None, None
if skeleon_data_cfg.save_video:
file = os.path.join(skeleon_data_cfg.img_dir,
'{}.png'.format(idx))
mmcv.imwrite(image, file)
skeleton_result['frame_index'] = idx
skeleton_result['position_preds'] = preds
skeleton_result['position_maxvals'] = maxvals
result_queue.put(skeleton_result)