本文整理汇总了Python中model.test.im_detect方法的典型用法代码示例。如果您正苦于以下问题:Python test.im_detect方法的具体用法?Python test.im_detect怎么用?Python test.im_detect使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.test
的用法示例。
在下文中一共展示了test.im_detect方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: video_demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def video_demo(sess, net, image):
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, image)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.85
NMS_THRESH = 0.3
inds = np.where(scores[:, 0] > CONF_THRESH)[0]
scores = scores[inds, 0]
boxes = boxes[inds, :]
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
return dets
# vis_detections(image, CLASSES[1], dets, thresh=CONF_THRESH)
示例2: run_on_fddb
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def run_on_fddb(sess, net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im = cv2.imread(image_name)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.5
NMS_THRESH = 0.3
inds = np.where(scores[:, 0] > CONF_THRESH)[0]
scores = scores[inds, 0]
boxes = boxes[inds, :]
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
return dets
示例3: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(sess, net, im_file, icdar_dir, oriented=False, ltrb=False):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im = helper.read_rgb_img(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes, resized_im_shape, im_scale = im_detect(sess, net, im)
timer.toc()
# Run TextDetector to merge small box
line_detector = TextDetector(oriented)
# text_lines point order: left-top, right-top, left-bottom, right-bottom
text_lines = line_detector.detect(boxes, scores[:, np.newaxis], resized_im_shape)
print("Image %s, detect %d text lines in %.3fs" % (im_file, len(text_lines), timer.diff))
if len(text_lines) != 0:
text_lines = recover_scale(text_lines, im_scale)
return save_result_txt(text_lines, icdar_dir, im_file, ltrb)
示例4: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time(), boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(torch.from_numpy(dets), NMS_THRESH)
dets = dets[keep.numpy(), :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:28,代码来源:demo.py
示例5: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(sess, net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
once_time = 0
im = cv2.imread(img_path)
# print('>>>>>>>', im.shape[0], im.shape[1])
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
timer.toc()
once_time = timer.total_time
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.85
NMS_THRESH = 0.3
inds = np.where(scores[:, 0] > CONF_THRESH)[0]
scores = scores[inds, 0]
boxes = boxes[inds, :]
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
print('>>>>>num_faces:', dets.shape[0])
cv2_vis(im, CLASSES[1], dets)
return once_time
示例6: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(sess, net, im_file, result_dir, viz=False, oriented=False):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im = helper.read_rgb_img(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes, resized_im_shape, im_scale = im_detect(sess, net, im)
timer.toc()
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
img_name = im_file.split('/')[-1]
draw_rpn_boxes(im, img_name, boxes, scores[:, np.newaxis], im_scale, True, result_dir)
draw_rpn_boxes(im, img_name, boxes, scores[:, np.newaxis], im_scale, False, result_dir)
# Run TextDetector to merge small box
line_detector = TextDetector(oriented)
# line_detector 的输入必须是在 scale 之后的图片上!!,
# 如果还原了以后再进行行构建,原图可能太大,导致每个 anchor 的 width 很大,导致 MAX_HORIZONTAL_GAP 太小
# text_lines point order: left-top, right-top, left-bottom, right-bottom
text_lines = line_detector.detect(boxes, scores[:, np.newaxis], resized_im_shape)
print("Image %s, detect %d text lines in %.3fs" % (im_file, len(text_lines), timer.diff))
if len(text_lines) != 0:
text_lines = recover_scale(text_lines, im_scale)
save_result(im, img_name, text_lines, result_dir)
# Visualize detections
if viz:
vis_detections(im, CLASSES[1], text_lines)
示例7: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(sess, net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
示例8: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(sess, net, image_name,bbox):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
#im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
if os.path.isfile(os.path.join(data_dir, image_name)):
im_file = os.path.join(data_dir, image_name)
else:
im_file = os.path.join(data_dir_2, image_name)
revise=40
im = cv2.imread(im_file)
pixel_means=np.array([[[102, 115, 122]]])
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
try:
scores, boxes,_,_ = im_detect(sess, net, im)
except Exception as e:
print(e)
return
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.0
NMS_THRESH = 1.0
for cls_ind, cls in enumerate(CLASSES[1:]):
if cls=='authentic':
continue
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
im_score=vis_detections(im, cls, dets,image_name, thresh=CONF_THRESH)
return im_score
示例9: demo
# 需要导入模块: from model import test [as 别名]
# 或者: from model.test import im_detect [as 别名]
def demo(sess, net, im_file, RCNN):
"""Detect object classes in an image using pre-computed object proposals."""
image_name = im_file.split('/')[-1]
tmp = []
# Load the demo image
im = cv2.imread(im_file)
im = im[:, :, (2, 1, 0)]
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
timer.toc()
#print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.3
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
for det_inst in dets:
if det_inst[4] > CONF_THRESH:
inst_tmp = [image_name]
if cls_ind == 1:
inst_tmp.append('Human')
else:
inst_tmp.append('Object')
inst_tmp.append(det_inst[:4])
inst_tmp.append(np.nan)
inst_tmp.append(cls_ind)
inst_tmp.append(det_inst[4])
tmp.append(inst_tmp)
RCNN[image_name] = tmp