當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.BFMatcher方法代碼示例

本文整理匯總了Python中cv2.BFMatcher方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.BFMatcher方法的具體用法?Python cv2.BFMatcher怎麽用?Python cv2.BFMatcher使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.BFMatcher方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: baseline_sift_matching

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def baseline_sift_matching(img1, img2):
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    matches = cv2.BFMatcher().knnMatch(des1, des2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.7*n.distance]
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None,
                              matchColor=(0, 255, 0), matchesMask=None,
                              singlePointColor=(255, 0, 0), flags=0)
    return img3 
開發者ID:ethz-asl,項目名稱:hfnet,代碼行數:13,代碼來源:frame_matching.py

示例2: init_detector

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, star/brief is in contrib module, you need to compile it seperately.
            try:
                self.star_detector = cv2.xfeatures2d.StarDetector_create()
                self.brief_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
            except:
                import traceback
                traceback.print_exc()
                print("to use %s, you should build contrib with opencv3.0" % self.METHOD_NAME)
                raise NoModuleError("There is no %s module in your OpenCV environment !" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.star_detector = cv2.FeatureDetector_create("STAR")
            self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")

        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:22,代碼來源:keypoint_matching_contrib.py

示例3: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def __init__(self, fe_conf: feature_extractor_conf.Default_feature_extractor_conf):
        # STD attributes
        self.fe_conf: feature_extractor_conf.Default_feature_extractor_conf = fe_conf
        self.logger = logging.getLogger(__name__)
        self.logger.info("Creation of a Picture BoW Orber")

        self.algo = cv2.ORB_create(nfeatures=fe_conf.ORB_KEYPOINTS_NB)
        # TODO : Dictionnary path / Vocabulary
        self.bow_descriptor = cv2.BOWImgDescriptorExtractor(self.algo, cv2.BFMatcher(cv2.NORM_HAMMING))
        self.vocab_loaded = False
        try :
            vocab = BoWOrb_Vocabulary_Creator.load_vocab_from_file(fe_conf.BOW_VOCAB_PATH)
            self.bow_descriptor.setVocabulary(vocab)
            self.vocab_loaded = True
        except Exception as e :
            self.logger.error(f"No vocabulary file provided. Not possible to use Bow-ORB : {e}") 
開發者ID:CIRCL,項目名稱:douglas-quaid,代碼行數:18,代碼來源:picture_bow_orber.py

示例4: BFMatch_SIFT

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def BFMatch_SIFT(img1, img2):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])

    return (kp1, kp2, good) 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:21,代碼來源:feature_match.py

示例5: evalSift

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def evalSift():
    # SIFT
    print('SIFT')
    sift = cv2.xfeatures2d.SIFT_create()
    matcher = cv2.BFMatcher()
    times = []
    for pair in eval_set:
        t = time.time()
        kpts = [sift.detect(im) for im in pair.im]
        srt = [sorted(kpt, key=lambda x: x.response, reverse=True) for kpt in kpts]
        srt128 = [s[:128] for s in srt]
        d = [sift.compute(im, s)[1] for im, s in zip(pair.im, srt128)]
        matches = matcher.match(d[0], d[1])
        times.append(time.time() - t)
        print(times[-1])
    return times 
開發者ID:uzh-rpg,項目名稱:imips_open,代碼行數:18,代碼來源:timing.py

示例6: evalSurf

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def evalSurf():
    # SIFT
    print('SURF')
    sift = cv2.xfeatures2d.SURF_create()
    matcher = cv2.BFMatcher()
    times = []
    for pair in eval_set:
        t = time.time()
        kpts = [sift.detect(im) for im in pair.im]
        srt = [sorted(kpt, key=lambda x: x.response, reverse=True) for kpt in kpts]
        srt128 = [s[:128] for s in srt]
        d = [sift.compute(im, s)[1] for im, s in zip(pair.im, srt128)]
        matches = matcher.match(d[0], d[1])
        times.append(time.time() - t)
        print(times[-1])
    return times 
開發者ID:uzh-rpg,項目名稱:imips_open,代碼行數:18,代碼來源:timing.py

示例7: evalLfNet

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def evalLfNet():
    # lf_net took 2:49 for 2761 images according to its own progress bar timer
    # 169/2761*2 -> 0.122 for two images
    # Add to that the following time for matching (0.48ms -> negligible):
    seq_fps = baselines.parseLFNetOuts(
        eval_set, FLAGS.baseline_num_ips)
    matcher = cv2.BFMatcher()
    times = []
    for pair in eval_set:
        folder = pair.seqname
        [a, b] = pair.indices
        forward_passes = [seq_fps['%s%s' % (folder, i)] for i in [a, b]]
        t = time.time()
        matches = matcher.match(
            forward_passes[0].descriptors, forward_passes[1].descriptors)
        times.append(time.time() - t)
        print(times[-1])
    return times 
開發者ID:uzh-rpg,項目名稱:imips_open,代碼行數:20,代碼來源:timing.py

示例8: init_feature

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
開發者ID:NetEase,項目名稱:airtest,代碼行數:27,代碼來源:findobj.py

示例9: matchAB

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def matchAB(fileA, fileB):
    # 讀取圖像數據
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)

    # 轉換成灰色
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

    # akaze特征量抽出
    akaze = cv2.AKAZE_create()
    kpA, desA = akaze.detectAndCompute(grayA, None)
    kpB, desB = akaze.detectAndCompute(grayB, None)

    # BFMatcher定義和圖形化
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(desB, desB)
    matches = sorted(matches, key=lambda x: x.distance)
    matched_image = cv2.drawMatches(imgA, kpA, imgB, kpB, matches, None, flags=2)

    plt.imshow(cv2.cvtColor(matched_image, cv2.COLOR_BGR2RGB))
    plt.show() 
開發者ID:cangyan,項目名稱:image-detect,代碼行數:24,代碼來源:image_detect_01.py

示例10: matching

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def matching(desc1, desc2, do_ratio_test=False, cross_check=True):
    if desc1.dtype == np.bool and desc2.dtype == np.bool:
        desc1, desc2 = np.packbits(desc1, axis=1), np.packbits(desc2, axis=1)
        norm = cv2.NORM_HAMMING
    else:
        desc1, desc2 = np.float32(desc1), np.float32(desc2)
        norm = cv2.NORM_L2

    if do_ratio_test:
        matches = []
        matcher = cv2.BFMatcher(norm)
        for m, n in matcher.knnMatch(desc1, desc2, k=2):
            m.distance = 1.0 if (n.distance == 0) else m.distance / n.distance
            matches.append(m)
    else:
        matcher = cv2.BFMatcher(norm, crossCheck=cross_check)
        matches = matcher.match(desc1, desc2)
    return matches_cv2np(matches) 
開發者ID:ethz-asl,項目名稱:hfnet,代碼行數:20,代碼來源:descriptors.py

示例11: init_feature

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(400)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
開發者ID:UASLab,項目名稱:ImageAnalysis,代碼行數:27,代碼來源:find_obj.py

示例12: init_feature

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:33,代碼來源:find_obj.py

示例13: get_match_coords

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def get_match_coords(keyPoints1, descriptors1,
                                    keyPoints2, descriptors2,
                                    matcher=cv2.BFMatcher,
                                    norm=cv2.NORM_HAMMING,
                                    ratio_test=0.7,
                                    verbose=True,
                                    **kwargs):
    ''' Filter matching keypoints and convert to X,Y coordinates
    Parameters
    ----------
        keyPoints1 : list - keypoints on img1 from find_key_points()
        descriptors1 : list - descriptors on img1 from find_key_points()
        keyPoints2 : list - keypoints on img2 from find_key_points()
        descriptors2 : list - descriptors on img2 from find_key_points()
        matcher : matcher from CV2
        norm : int - type of distance
        ratio_test : float - Lowe ratio
        verbose : bool - print some output ?
    Returns
    -------
        x1, y1, x2, y2 : coordinates of start and end of displacement [pixels]
    '''
    matches = _get_matches(descriptors1,
                           descriptors2, matcher, norm, verbose)
    x1, y1, x2, y2 = _filter_matches(matches, ratio_test,
                                     keyPoints1, keyPoints2, verbose)
    return x1, y1, x2, y2 
開發者ID:nansencenter,項目名稱:sea_ice_drift,代碼行數:29,代碼來源:ftlib.py

示例14: _get_matches

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def _get_matches(descriptors1, descriptors2, matcher, norm, verbose):
    ''' Match keypoints using BFMatcher with cv2.NORM_HAMMING '''
    t0 = time.time()
    bf = matcher(norm)
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)
    t1 = time.time()
    if verbose:
        print('Keypoints matched', t1 - t0)
    return matches 
開發者ID:nansencenter,項目名稱:sea_ice_drift,代碼行數:11,代碼來源:ftlib.py

示例15: init_detector

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import BFMatcher [as 別名]
def init_detector(self):
        """Init keypoint detector object."""
        self.detector = cv2.KAZE_create()
        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:7,代碼來源:keypoint_base.py


注:本文中的cv2.BFMatcher方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。