當前位置: 首頁>>代碼示例>>Python>>正文


Python test.im_detect方法代碼示例

本文整理匯總了Python中model.test.im_detect方法的典型用法代碼示例。如果您正苦於以下問題:Python test.im_detect方法的具體用法?Python test.im_detect怎麽用?Python test.im_detect使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在model.test的用法示例。


在下文中一共展示了test.im_detect方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: video_demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def video_demo(sess, net, image):
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, image)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.85
    NMS_THRESH = 0.3

    inds = np.where(scores[:, 0] > CONF_THRESH)[0]
    scores = scores[inds, 0]
    boxes = boxes[inds, :]
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
    keep = nms(dets, NMS_THRESH)
    dets = dets[keep, :]
    return dets
    # vis_detections(image, CLASSES[1], dets, thresh=CONF_THRESH) 
開發者ID:wanjinchang,項目名稱:SSH-TensorFlow,代碼行數:22,代碼來源:demo.py

示例2: run_on_fddb

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def run_on_fddb(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im = cv2.imread(image_name)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.5
    NMS_THRESH = 0.3

    inds = np.where(scores[:, 0] > CONF_THRESH)[0]
    scores = scores[inds, 0]
    boxes = boxes[inds, :]
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
    keep = nms(dets, NMS_THRESH)
    dets = dets[keep, :]
    return dets 
開發者ID:wanjinchang,項目名稱:SSH-TensorFlow,代碼行數:26,代碼來源:RunOnFDDB.py

示例3: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(sess, net, im_file, icdar_dir, oriented=False, ltrb=False):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im = helper.read_rgb_img(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes, resized_im_shape, im_scale = im_detect(sess, net, im)
    timer.toc()

    # Run TextDetector to merge small box
    line_detector = TextDetector(oriented)

    # text_lines point order: left-top, right-top, left-bottom, right-bottom
    text_lines = line_detector.detect(boxes, scores[:, np.newaxis], resized_im_shape)
    print("Image %s, detect %d text lines in %.3fs" % (im_file, len(text_lines), timer.diff))

    if len(text_lines) != 0:
        text_lines = recover_scale(text_lines, im_scale)

    return save_result_txt(text_lines, icdar_dir, im_file, ltrb) 
開發者ID:Sanster,項目名稱:tf_ctpn,代碼行數:25,代碼來源:icdar.py

示例4: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time(), boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(torch.from_numpy(dets), NMS_THRESH)
        dets = dets[keep.numpy(), :]
        vis_detections(im, cls, dets, thresh=CONF_THRESH) 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:28,代碼來源:demo.py

示例5: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    once_time = 0


    im = cv2.imread(img_path)
    # print('>>>>>>>', im.shape[0], im.shape[1])

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    once_time = timer.total_time
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.85
    NMS_THRESH = 0.3


    inds = np.where(scores[:, 0] > CONF_THRESH)[0]
    scores = scores[inds, 0]
    boxes = boxes[inds, :]
    dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
    keep = nms(dets, NMS_THRESH)
    dets = dets[keep, :]
    print('>>>>>num_faces:', dets.shape[0])
    cv2_vis(im, CLASSES[1], dets)
    return once_time 
開發者ID:wanjinchang,項目名稱:SSH-TensorFlow,代碼行數:34,代碼來源:demo.py

示例6: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(sess, net, im_file, result_dir, viz=False, oriented=False):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im = helper.read_rgb_img(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes, resized_im_shape, im_scale = im_detect(sess, net, im)
    timer.toc()

    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    img_name = im_file.split('/')[-1]

    draw_rpn_boxes(im, img_name, boxes, scores[:, np.newaxis], im_scale, True, result_dir)
    draw_rpn_boxes(im, img_name, boxes, scores[:, np.newaxis], im_scale, False, result_dir)

    # Run TextDetector to merge small box
    line_detector = TextDetector(oriented)

    # line_detector 的輸入必須是在 scale 之後的圖片上!!,
    # 如果還原了以後再進行行構建,原圖可能太大,導致每個 anchor 的 width 很大,導致 MAX_HORIZONTAL_GAP 太小
    # text_lines point order: left-top, right-top, left-bottom, right-bottom
    text_lines = line_detector.detect(boxes, scores[:, np.newaxis], resized_im_shape)
    print("Image %s, detect %d text lines in %.3fs" % (im_file, len(text_lines), timer.diff))

    if len(text_lines) != 0:
        text_lines = recover_scale(text_lines, im_scale)
        save_result(im, img_name, text_lines, result_dir)

    # Visualize detections
    if viz:
        vis_detections(im, CLASSES[1], text_lines) 
開發者ID:Sanster,項目名稱:tf_ctpn,代碼行數:36,代碼來源:demo.py

示例7: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, thresh=CONF_THRESH) 
開發者ID:endernewton,項目名稱:tf-faster-rcnn,代碼行數:28,代碼來源:demo.py

示例8: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(sess, net, image_name,bbox):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    if os.path.isfile(os.path.join(data_dir, image_name)):
        im_file = os.path.join(data_dir, image_name)

    else:
        im_file = os.path.join(data_dir_2, image_name)
    revise=40
    im = cv2.imread(im_file) 
    pixel_means=np.array([[[102, 115, 122]]])

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    try:
        scores, boxes,_,_ = im_detect(sess, net, im)
    except Exception as e:
        print(e)
        return
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.0
    NMS_THRESH = 1.0
    
    for cls_ind, cls in enumerate(CLASSES[1:]):
        if cls=='authentic':
            continue
        cls_ind += 1 # because we skipped background

        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        im_score=vis_detections(im, cls, dets,image_name, thresh=CONF_THRESH)
    return im_score 
開發者ID:pengzhou1108,項目名稱:RGB-N,代碼行數:44,代碼來源:demo.py

示例9: demo

# 需要導入模塊: from model import test [as 別名]
# 或者: from model.test import im_detect [as 別名]
def demo(sess, net, im_file, RCNN):
    """Detect object classes in an image using pre-computed object proposals."""
    
    image_name = im_file.split('/')[-1]
    tmp = []
    
    # Load the demo image
    im = cv2.imread(im_file)
    im = im[:, :, (2, 1, 0)]
    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)
    timer.toc()
    #print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.3
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
       
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        
        for det_inst in dets:
            if det_inst[4] > CONF_THRESH:
                inst_tmp = [image_name]
                if cls_ind == 1:
                    inst_tmp.append('Human')
                else:
                    inst_tmp.append('Object')
                inst_tmp.append(det_inst[:4])
                inst_tmp.append(np.nan)
                inst_tmp.append(cls_ind)
                inst_tmp.append(det_inst[4])
                tmp.append(inst_tmp)
                    
                    
    RCNN[image_name] = tmp 
開發者ID:vt-vl-lab,項目名稱:iCAN,代碼行數:46,代碼來源:Object_Detector.py


注:本文中的model.test.im_detect方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。