當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.FlannBasedMatcher方法代碼示例

本文整理匯總了Python中cv2.FlannBasedMatcher方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.FlannBasedMatcher方法的具體用法?Python cv2.FlannBasedMatcher怎麽用?Python cv2.FlannBasedMatcher使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.FlannBasedMatcher方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: knn_match

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def knn_match(des1, des2, nn_ratio=0.7):
  
  # FLANN parameters
  index_params = dict(algorithm = 0, trees = 5)
  search_params = dict(checks = 50)

  flann = cv2.FlannBasedMatcher(index_params, search_params)
  
  # Match features from each image
  matches = flann.knnMatch(des1, des2, k=2)

  # store only the good matches as per Lowe's ratio test.
  good = []
  for m, n in matches:
    if m.distance < nn_ratio * n.distance:
      good.append(m)

  return good

# calculate the angle with the horizontal 
開發者ID:dmartinalbo,項目名稱:image-matching,代碼行數:22,代碼來源:image-matching.py

示例2: init_detector

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, sift is in contrib module, you need to compile it seperately.
            try:
                self.detector = cv2.xfeatures2d.SIFT_create(edgeThreshold=10)
            except:
                import traceback
                traceback.print_exc()
                raise NoModuleError("There is no %s module in your OpenCV environment, need contribmodule!" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.detector = cv2.SIFT(edgeThreshold=10)

        # # create FlnnMatcher object:
        self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50)) 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:19,代碼來源:keypoint_matching_contrib.py

示例3: init_feature

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
開發者ID:NetEase,項目名稱:airtest,代碼行數:27,代碼來源:findobj.py

示例4: _searchAndmatch

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def _searchAndmatch(image_1_descriptors, image_2_descriptors, threshold=0.7
                    ,image_2_keypoint=None):
    """KNN Match"""
    Good_match_keypoints, kp2_xy = [], []
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(image_1_descriptors, image_2_descriptors, k=2)
    """Lower's threshold"""
    for m,n in matches:
        if image_2_keypoint: kp2_xy.append(image_2_keypoint[m.trainIdx].pt)
        if m.distance < threshold*n.distance: Good_match_keypoints.append(m)
    return Good_match_keypoints, kp2_xy

#refine center 
開發者ID:NetEase,項目名稱:airtest,代碼行數:18,代碼來源:image_SIFT.py

示例5: flann_matching

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def flann_matching(orb_match1, orb_match2):
    kp1, des1 = orb_match1
    kp2, des2 = orb_match2

    # FLANN parameters
    index_params = dict(algorithm=6,  # FLANN_INDEX_LSH
                        table_number=12,
                        key_size=12,
                        multi_probe_level=2)
    search_params = dict(checks=100)  # or pass empty dictionary
    flann_matcher = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann_matcher.knnMatch(des1, des2, k=2)

    cor = []
    # ratio test as per Lowe's paper
    for m_n in matches:
        if len(m_n) != 2:
            continue
        elif m_n[0].distance < 0.80 * m_n[1].distance:
            cor.append([kp1[m_n[0].queryIdx].pt[0], kp1[m_n[0].queryIdx].pt[1],
                        kp2[m_n[0].trainIdx].pt[0], kp2[m_n[0].trainIdx].pt[1],
                        m_n[0].distance])

    return np.array(cor) 
開發者ID:jagin,項目名稱:detectron2-pipeline,代碼行數:26,代碼來源:pose_flow.py

示例6: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=kRatioTest, type = FeatureMatcherTypes.FLANN):
        super().__init__(norm_type=norm_type, cross_check=cross_check, ratio_test=ratio_test, type=type)
        if norm_type == cv2.NORM_HAMMING:
            # FLANN parameters for binary descriptors 
            FLANN_INDEX_LSH = 6
            self.index_params= dict(algorithm = FLANN_INDEX_LSH,   # Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search
                        table_number = 6,      # 12
                        key_size = 12,         # 20
                        multi_probe_level = 1) # 2            
        if norm_type == cv2.NORM_L2: 
            # FLANN parameters for float descriptors 
            FLANN_INDEX_KDTREE = 1
            self.index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)  
        self.search_params = dict(checks=32)   # or pass empty dictionary                 
        self.matcher = cv2.FlannBasedMatcher(self.index_params, self.search_params)  
        self.matcher_name = 'FlannFeatureMatcher' 
開發者ID:luigifreda,項目名稱:pyslam,代碼行數:18,代碼來源:feature_matcher.py

示例7: init_feature

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(400)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
開發者ID:UASLab,項目名稱:ImageAnalysis,代碼行數:27,代碼來源:find_obj.py

示例8: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def __init__(self):
        self.detector = cv2.ORB_create( nfeatures = 1000 )
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
        self.targets = []
        self.frame_points = [] 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:7,代碼來源:plane_tracker.py

示例9: init_feature

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:33,代碼來源:find_obj.py

示例10: FlannMatch_SIFT

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def FlannMatch_SIFT(img1, img2):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in xrange(len(matches))]

    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]

    return (kp1, kp2, matches, matchesMask) 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:28,代碼來源:feature_match.py

示例11: matchKeypoints

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
                       ratio, reprojThresh):
        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)   # or pass empty dictionary

        # compute the raw matches
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        rawMatches = flann.knnMatch(featuresA, featuresB, k=2)

        # perform Lowe's ratio test to get actual matches
        matches = []
        for m, n in rawMatches:
            # ensure the distance is within a certain ratio of each
            # other (i.e. Lowe's ratio test)
            if m.distance < ratio * n.distance:
                # here queryIdx corresponds to kpsA
                # trainIdx corresponds to kpsB
                matches.append((m.trainIdx, m.queryIdx))

        # computing a homography requires at least 4 matches
        if len(matches) > 4:
            # construct the two sets of points
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # compute the homography between the two sets of points
            (H, status) = cv2.findHomography(
                ptsB, ptsA, cv2.RANSAC, reprojThresh)

            # return the matches along with the homograpy matrix
            # and status of each matched point
            return (matches, H, status)
        else:
            # otherwise, no homograpy could be computed
            return None 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:39,代碼來源:stitcher.py

示例12: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def __init__(self, templates, ratio=0.75):
        self.templates = templates
        self.ratio = ratio

        flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})
        self.pool = ThreadPool(processes=cv2.getNumberOfCPUs()) 
開發者ID:AVGInnovationLabs,項目名稱:DoNotSnap,代碼行數:9,代碼來源:TemplateMatcher.py

示例13: __setstate__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def __setstate__(self, state):
        self.templates = state['templates']
        self.ratio = state['ratio']

        flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})
        self.pool = ThreadPool(processes=1)  # cv2.getNumberOfCPUs()) 
開發者ID:AVGInnovationLabs,項目名稱:DoNotSnap,代碼行數:9,代碼來源:TemplateMatcher.py

示例14: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def __init__(self): 
        # Use locality sensitive hashing algorithm 
        flann_params = dict(algorithm = 6, table_number = 6, key_size = 12, multi_probe_level = 1) 
 
        self.min_matches = 10 
        self.cur_target = namedtuple('Current', 'image, rect, keypoints, descriptors, data')
        self.tracked_target = namedtuple('Tracked', 'target, points_prev, points_cur, H, quad') 
 
        self.feature_detector = cv2.ORB_create()
        self.feature_detector.setMaxFeatures(1000)
        self.feature_matcher = cv2.FlannBasedMatcher(flann_params, {}) 
        self.tracking_targets = [] 
 
    # Function to add a new target for tracking 
開發者ID:PacktPublishing,項目名稱:OpenCV-3-x-with-Python-By-Example,代碼行數:16,代碼來源:pose_estimation.py

示例15: find

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FlannBasedMatcher [as 別名]
def find(search_file, image_file, threshold=None):
    '''
    param threshold are disabled in sift match.
    '''
    sch = _cv2open(search_file, 0)
    img = _cv2open(image_file, 0)

    kp_sch, des_sch = sift.detectAndCompute(sch, None)
    kp_img, des_img = sift.detectAndCompute(img, None)

    if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT:
        return None

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des_sch, des_img, k=2)

    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)

    if len(good) > MIN_MATCH_COUNT:
        sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) 

        M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
        # matchesMask = mask.ravel().tolist()

        h, w = sch.shape
        pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        lt, br = dst[0][0], dst[2][0]
        return map(int, (lt[0]+w/2, lt[1]+h/2))
    else:
        return None 
開發者ID:NetEase,項目名稱:airtest,代碼行數:42,代碼來源:sift.py


注:本文中的cv2.FlannBasedMatcher方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。