當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.drawMatchesKnn方法代碼示例

本文整理匯總了Python中cv2.drawMatchesKnn方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.drawMatchesKnn方法的具體用法?Python cv2.drawMatchesKnn怎麽用?Python cv2.drawMatchesKnn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.drawMatchesKnn方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: baseline_sift_matching

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def baseline_sift_matching(img1, img2):
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    matches = cv2.BFMatcher().knnMatch(des1, des2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.7*n.distance]
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None,
                              matchColor=(0, 255, 0), matchesMask=None,
                              singlePointColor=(255, 0, 0), flags=0)
    return img3 
開發者ID:ethz-asl,項目名稱:hfnet,代碼行數:13,代碼來源:frame_matching.py

示例2: sift_pred

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def sift_pred(cv2_sift, bf, query_kp, query_des, patch,
              patch_kp=None, patch_des=None,
              template_img=None, draw_matches=False, ratio=0.6, fp=False):

    if patch_kp is None or patch_des is None:
        patch_kp, patch_des = get_keypoints(cv2_sift, patch)

    if patch_des is None:
        match_list = []
    else:
        match_list = bf.knnMatch(query_des, patch_des, k=2)
        match_list = [m for m in match_list if len(m) == 2]

    # Apply ratio test
    good = []
    score = 0.0
    for m, n in match_list:
        if m.distance < ratio * n.distance:
            good.append([m])
            if not fp:
                score += n.distance / np.maximum(m.distance, 0.01)
        else:
            if fp:
                score += np.sqrt((m.distance / n.distance - ratio))

    if draw_matches:
        template_img = resize(template_img.copy())
        if has_alpha(template_img):
            template_img = blend_white(template_img)
        if has_alpha(patch):
            patch = blend_white(patch)

        drawn_matches = cv2.drawMatchesKnn(template_img,
                                           query_kp,
                                           resize(patch),
                                           patch_kp,
                                           good, None, flags=2)

        return score, len(good), drawn_matches

    return score, len(good) 
開發者ID:ftramer,項目名稱:ad-versarial,代碼行數:43,代碼來源:model.py

示例3: match_img_knn

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def match_img_knn(queryImage, trainingImage, thread=0):
    sift = cv2.xfeatures2d.SIFT_create()  # 創建sift檢測器
    kp1, des1 = sift.detectAndCompute(queryImage, None)
    kp2, des2 = sift.detectAndCompute(trainingImage, None)
    #print(len(kp1))
    # 設置Flannde參數
    FLANN_INDEX_KDTREE = 1
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    searchParams = dict(checks=50)
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []

    # 設置好初始匹配值
    matchesMask = [[0, 0] for i in range(len(matches))]
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7*n.distance:  # 舍棄小於0.7的匹配結果
            matchesMask[i] = [1, 0]
            good.append(m)

    s = sorted(good, key=lambda x: x.distance)
    '''
    drawParams=dict(matchColor=(0,0,255),singlePointColor=(255,0,0),matchesMask=matchesMask,flags=0) #給特征點和匹配的線定義顏色
    resultimage=cv2.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams) #畫出匹配的結果
    cv2.imshow('res',resultimage)
    cv2.waitKey(0)
    '''
    #print(len(good))
    if len(good) > thread:
        maxLoc = kp2[s[0].trainIdx].pt
        #print(maxLoc)
        return (int(maxLoc[0]), int(maxLoc[1]))
    else:
        return (0, 0) 
開發者ID:AcademicDog,項目名稱:onmyoji_bot,代碼行數:37,代碼來源:image_proc.py

示例4: debug_matching

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def debug_matching(frame1, frame2, path_image1, path_image2, matches,
                   matches_mask, num_points, use_ratio_test):
    img1 = cv2.imread(path_image1, 0)
    img2 = cv2.imread(path_image2, 0)

    kp1 = get_ocv_kpts_from_np(frame1['keypoints'][:num_points, :])
    kp2 = get_ocv_kpts_from_np(frame2['keypoints'][:num_points, :])

    if use_ratio_test:
        img = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None,
                                 matchColor=(0, 255, 0),
                                 matchesMask=matches_mask,
                                 singlePointColor=(255, 0, 0), flags=0)
    else:
        img = cv2.drawMatches(img1, kp1, img2, kp2, matches, None,
                              matchColor=(0, 255, 0),
                              singlePointColor=(255, 0, 0), flags=0)

    img_sift = baseline_sift_matching(img1, img2)

    fig = plt.figure(figsize=(2, 1))
    fig.add_subplot(2, 1, 1)
    plt.imshow(img)
    plt.title('Custom features')
    fig.add_subplot(2, 1, 2)
    plt.imshow(img_sift)
    plt.title('SIFT')
    plt.show() 
開發者ID:ethz-asl,項目名稱:hfnet,代碼行數:30,代碼來源:frame_matching.py

示例5: matchSift

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def matchSift(imgA,imgB):   
    img1 = cv2.imread(imgA, 0) 
    img2 = cv2.imread(imgB, 0)  
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)  #獲取SIFT關鍵點和描述子
    kp2, des2 = sift.detectAndCompute(img2, None)  
    bf = cv2.BFMatcher()  
    matches = bf.knnMatch(des1, des2, k=2)  #根據描述子匹配圖像,返回n個最佳匹配
    """
    .   @param k Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.
    The result of matches = bf.match(des1,des2) line is a list of DMatch objects. This DMatch object has following attributes:
    DMatch.distance - Distance between descriptors. The lower, the better it is.
    DMatch.trainIdx - Index of the descriptor in train descriptors
    DMatch.queryIdx - Index of the descriptor in query descriptors
    DMatch.imgIdx - Index of the train image.
    參看:https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    """
    print(type(matches),matches[:2],(matches[0][0].distance,matches[0][1].distance))
    good = []  
    for m, n in matches:  
        if m.distance < 0.75 * n.distance:  #因為k=2,因此返回距離最近和次近關鍵點,比較最近和次近,滿足最近/次近<value,才被認為匹配。ratio test explained by D.Lowe in his paper
            good.append([m])  
    
    imgM = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[0:int(1*len(good)):int(0.1*len(good))], None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)  
    fig, ax=plt.subplots(figsize=(50,30))
    ax.imshow(imgM), plt.show()    
#    cv2.imshow('matchSift',imgM)
#    cv2.waitKey() 
開發者ID:richieBao,項目名稱:python-urbanPlanning,代碼行數:30,代碼來源:opencv_py.py

示例6: get_matches

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)
        if des1 is None or des2 is None:
            return False
        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and y2 > self.yThreshold and x2 < self.xThreshold:
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster, new_matches = self.compare_distances(train_img, cluster, good_matches)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, new_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True 
開發者ID:will7200,項目名稱:Yugioh-bot,代碼行數:42,代碼來源:trainer_matches.py

示例7: get_feature_point_list

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawMatchesKnn [as 別名]
def get_feature_point_list(
        self, template_pic_object: np.ndarray, target_pic_object: np.ndarray
    ) -> typing.Sequence[Point]:
        """
        compare via feature matching

        :param template_pic_object:
        :param target_pic_object:
        :return:
        """
        # IMPORTANT
        # sift and surf can not be used in python >= 3.8
        # so we switch it to ORB detector
        # maybe not enough precisely now

        # Initiate ORB detector
        orb = cv2.ORB_create()

        # find the keypoints and descriptors with ORB
        template_kp, template_desc = orb.detectAndCompute(template_pic_object, None)
        target_kp, target_desc = orb.detectAndCompute(target_pic_object, None)

        # key points count
        logger.debug(f"template key point count: {len(template_kp)}")
        logger.debug(f"target key point count: {len(target_kp)}")

        # find 2 points, which are the closest
        # 找到幀和幀之間的一致性的過程就是在一個描述符集合(詢問集)中找另一個集合(相當於訓練集)的最近鄰。 這裏找到 每個描述符 的 最近鄰與次近鄰
        # 一個正確的匹配會更接近第一個鄰居。換句話說,一個不正確的匹配,兩個鄰居的距離是相似的。因此,我們可以通過查看二者距離的不同來評判距匹配程度的好壞。
        # more details: https://blog.csdn.net/liangjiubujiu/article/details/80418079
        # flann = cv2.FlannBasedMatcher()
        # matches = flann.knnMatch(template_desc, target_desc, k=2)

        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        # 特征描述子匹配
        matches = bf.knnMatch(template_desc, target_desc, k=1)

        # matches are something like:
        # [[<DMatch 0x12400a350>, <DMatch 0x12400a430>], [<DMatch 0x124d6a170>, <DMatch 0x124d6a450>]]

        logger.debug(f"matches num: {len(matches)}")

        # TODO here is a sample to show feature points
        # temp = cv2.drawMatchesKnn(template_pic_object, kp1, target_pic_object, kp2, matches, None, flags=2)
        # cv2.imshow('feature_points', temp)
        # cv2.waitKey(0)

        good = list()
        if matches:
            good = matches[0]

        # get positions
        point_list = list()
        for each in good:
            target_idx = each.trainIdx
            each_point = Point(*target_kp[target_idx].pt)
            point_list.append(each_point)

        return point_list 
開發者ID:williamfzc,項目名稱:findit,代碼行數:61,代碼來源:feature.py


注:本文中的cv2.drawMatchesKnn方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。