當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.KeyPoint方法代碼示例

本文整理匯總了Python中cv2.KeyPoint方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.KeyPoint方法的具體用法?Python cv2.KeyPoint怎麽用?Python cv2.KeyPoint使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.KeyPoint方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def __init__(self, image, params):
        # TODO: pyramid representation
        self.image = image 
        self.height, self.width = image.shape[:2]

        self.keypoints = []      # list of cv2.KeyPoint
        self.descriptors = []    # numpy.ndarray

        self.detector = params.feature_detector
        self.extractor = params.descriptor_extractor
        self.matcher = params.descriptor_matcher

        self.cell_size = params.matching_cell_size
        self.matching_distance = params.matching_distance
        self.neighborhood = (
            params.matching_cell_size * params.matching_neighborhood)

        self._lock = Lock() 
開發者ID:uoip,項目名稱:rgbd_ptam,代碼行數:20,代碼來源:feature.py

示例2: patch_Keypoint_pickiling

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def patch_Keypoint_pickiling():
        # Create the bundling between class and arguements to save for Keypoint class
        # See : https://stackoverflow.com/questions/50337569/pickle-exception-for-cv2-boost-when-using-multiprocessing/50394788#50394788
        def _pickle_keypoint(keypoint):  # : cv2.KeyPoint
            return cv2.KeyPoint, (
                keypoint.pt[0],
                keypoint.pt[1],
                keypoint.size,
                keypoint.angle,
                keypoint.response,
                keypoint.octave,
                keypoint.class_id,
            )

        # C++ : KeyPoint (float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1)
        # Python: cv2.KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) → <KeyPoint object>

        # Apply the bundling to pickle
        copyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint)

    # non static, to be sure we patched it before use, only once 
開發者ID:CIRCL,項目名稱:douglas-quaid,代碼行數:23,代碼來源:pickle_import_export.py

示例3: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def detect(self, gray_img):
        """Detect keypoints in the gray-scale image.
        Args:
            gray_img: The input gray-scale image.
        Returns:
            npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array.
            cv_kpts: A list of keypoints represented as cv2.KeyPoint.
        """

        cv_kpts = self.sift.detect(gray_img, None)
        
        if self.ori_off:
            tmp_npy_kpts = [np.array([tmp_cv_kpt.pt[0], tmp_cv_kpt.pt[1], tmp_cv_kpt.size])
                for i, tmp_cv_kpt in enumerate(cv_kpts)]
            tmp_npy_kpts = np.stack(tmp_npy_kpts, axis=0)
            _, unique = np.unique(tmp_npy_kpts, axis=0, return_index=True)
            cv_kpts = [cv_kpts[i] for i in unique]

        all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts]
        self.first_octave = int(np.min(all_octaves))
        self.max_octave = int(np.max(all_octaves))

        npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave)
        return npy_kpts, cv_kpts 
開發者ID:lzx551402,項目名稱:geodesc,代碼行數:26,代碼來源:opencvhelper.py

示例4: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def __init__(self, image, params):
        # TODO: pyramid representation
        self.image = image 
        self.height, self.width = image.shape[:2]

        self.keypoints = []      # list of cv2.KeyPoint
        self.descriptors = []    # numpy.ndarray

        self.detector = params.feature_detector
        self.extractor = params.descriptor_extractor
        self.matcher = params.descriptor_matcher

        self.cell_size = params.matching_cell_size
        self.distance = params.matching_distance
        self.neighborhood = (
            params.matching_cell_size * params.matching_neighborhood)

        self._lock = Lock() 
開發者ID:uoip,項目名稱:stereo_ptam,代碼行數:20,代碼來源:feature.py

示例5: _unpickle_keypoints

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def _unpickle_keypoints(array, region_center, region_width,
                        region_height, image_width, image_height):
    keypoints, descriptors = [], []
    [center_x,center_y] = region_center
    top_left_x = int(center_x - region_width)
    top_left_y = int(center_y - region_height)
    bottom_right_x = int(center_x + region_width)
    bottom_right_y = int(center_y + region_height)
    if top_left_x < 0: top_left_x = 0
    if top_left_y < 0: top_left_y = 0
    if image_width < bottom_right_x: bottom_right_x = image_width - 1
    if image_height < bottom_right_y: bottom_right_y = image_height - 1
    for point in array:
        [x, y] = [int(point[0][0]), int(point[0][1])]
        if (x < top_left_x or y < top_left_y or 
            bottom_right_x < x or bottom_right_y < y):
            temp_keypoint = cv2.KeyPoint(x=point[0][0],y=point[0][1],_size=point[1],
                                        _angle=point[2],_response=point[3],
                                        _octave=point[4],_class_id=point[5])
            temp_descriptor = point[6]
            keypoints.append(temp_keypoint)
            descriptors.append(temp_descriptor)
    return keypoints, np.array(descriptors)

#zero the pixel in the image's given region 
開發者ID:NetEase,項目名稱:airtest,代碼行數:27,代碼來源:auto.py

示例6: draw_skel_and_kp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def draw_skel_and_kp(
        img, instance_scores, keypoint_scores, keypoint_coords,
        min_pose_score=0.5, min_part_score=0.5):
    out_img = img
    adjacent_keypoints = []
    cv_keypoints = []
    for ii, score in enumerate(instance_scores):
        if score < min_pose_score:
            continue

        new_keypoints = get_adjacent_keypoints(
            keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
        adjacent_keypoints.extend(new_keypoints)

        for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
            if ks < min_part_score:
                continue
            cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))

    out_img = cv2.drawKeypoints(
        out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
        flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
    return out_img 
開發者ID:rwightman,項目名稱:posenet-python,代碼行數:26,代碼來源:utils.py

示例7: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def detect(self, gray_img):
        """Detect keypoints in the gray-scale image.
        Args:
            gray_img: The input gray-scale image.
        Returns:
            npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array.
            cv_kpts: A list of keypoints represented as cv2.KeyPoint.
        """

        cv_kpts = self.sift.detect(gray_img, None)
        if len(cv_kpts)==0:
            return np.zeros([0,6]), []
        all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts]
        self.first_octave = int(np.min(all_octaves))
        self.max_octave = int(np.max(all_octaves))

        npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave)
        return npy_kpts, cv_kpts 
開發者ID:zju3dv,項目名稱:GIFT,代碼行數:20,代碼來源:opencvhelper.py

示例8: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def detect(self, gray_img):
        """Detect keypoints in the gray-scale image.
        Args:
            gray_img: The input gray-scale image.
        Returns:
            npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array.
            cv_kpts: A list of keypoints represented as cv2.KeyPoint.
        """

        cv_kpts = self.sift.detect(gray_img, None)
        response = np.array([kp.response for kp in cv_kpts])
        resp_sort = np.argsort(response)[::-1][0:self.n_feature].tolist()
        cv_kpts = [cv_kpts[i] for i in resp_sort]
        if self.n_feature > 0 and len(cv_kpts) > self.n_feature:
            cv_kpts = cv_kpts[0:self.n_feature]

        if len(cv_kpts) > 0:
            all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts]
            self.first_octave = int(np.min(all_octaves))
            self.max_octave = int(np.max(all_octaves))

            npy_kpts, cv_kpts = self.sample_by_octave(cv_kpts, self.n_sample, self.down_octave)
        else:
            npy_kpts = np.zeros((0, 0))
        return npy_kpts, cv_kpts 
開發者ID:luigifreda,項目名稱:pyslam,代碼行數:27,代碼來源:opencvhelper.py

示例9: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def detect(self, gray_img):
        """Detect keypoints in the gray-scale image.
        Args:
            gray_img: The input gray-scale image.
        Returns:
            npy_kpts: (n_kpts, 6) Keypoints represented as NumPy array.
            cv_kpts: A list of keypoints represented as cv2.KeyPoint.
        """

        cv_kpts = self.sift.detect(gray_img, None)

        all_octaves = [np.int8(i.octave & 0xFF) for i in cv_kpts]
        self.first_octave = int(np.min(all_octaves))
        self.max_octave = int(np.max(all_octaves))

        npy_kpts, cv_kpts = sample_by_octave(cv_kpts, self.n_sample, self.down_octave)
        return npy_kpts, cv_kpts 
開發者ID:luigifreda,項目名稱:pyslam,代碼行數:19,代碼來源:opencvhelper.py

示例10: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def __init__(self, do_cuda=True): 
        self.lock = RLock()
        self.opts = SuperPointOptions(do_cuda)
        print(self.opts)        
        
        print('SuperPointFeature2D')
        print('==> Loading pre-trained network.')
        # This class runs the SuperPoint network and processes its outputs.
        self.fe = SuperPointFrontend(weights_path=self.opts.weights_path,
                                nms_dist=self.opts.nms_dist,
                                conf_thresh=self.opts.conf_thresh,
                                nn_thresh=self.opts.nn_thresh,
                                cuda=self.opts.cuda)
        print('==> Successfully loaded pre-trained network.')
                        
        self.pts = []
        self.kps = []        
        self.des = []
        self.heatmap = [] 
        self.frame = None 
        self.frameFloat = None 
        self.keypoint_size = 20  # just a representative size for visualization and in order to convert extracted points to cv2.KeyPoint 
          
    # compute both keypoints and descriptors 
開發者ID:luigifreda,項目名稱:pyslam,代碼行數:26,代碼來源:feature_superpoint.py

示例11: convert_pts_to_keypoints

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def convert_pts_to_keypoints(pts, size=1): 
    kps = []
    if pts is not None: 
        if pts.ndim > 2:
            # convert matrix [Nx1x2] of pts into list of keypoints  
            kps = [ cv2.KeyPoint(p[0][0], p[0][1], _size=size) for p in pts ]          
        else: 
            # convert matrix [Nx2] of pts into list of keypoints  
            kps = [ cv2.KeyPoint(p[0], p[1], _size=size) for p in pts ]                      
    return kps         


# from https://stackoverflow.com/questions/48385672/opencv-python-unpack-sift-octave
# from https://gist.github.com/lxc-xx/7088609 (SIFT implementation)
# from https://stackoverflow.com/questions/17015995/opencv-sift-descriptor-keypoint-radius
# from https://github.com/vlfeat/vlfeat/blob/38a03e12daf50ee98633de06120834d0d1d87e23/vl/sift.c#L1948  (vlfeat SIFT implementation)
# see also https://www.vlfeat.org/api/sift.html (documentation of vlfeat SIFT implementation)
# N.B.: the opencv SIFT implementation uses a negative first octave (int firstOctave = -1) to work with an higher resolution image (scale=2.0, double size) 
開發者ID:luigifreda,項目名稱:pyslam,代碼行數:20,代碼來源:utils_features.py

示例12: unpackSiftOctave

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def unpackSiftOctave(kpt):
    """unpackSIFTOctave(kpt)->(octave,layer,scale)
    @brief Unpack Sift Keypoint
    @param kpt: cv2.KeyPoint (of SIFT)
    """
    _octave = kpt.octave
    octave = int(_octave)&0xFF
    layer  = (_octave>>8)&0xFF
    if octave>=128:
        octave |= -128
    if octave>=0:
        scale = float(1.0/(1<<octave))
    else:
        scale = float(1<<(-octave))
    #print('sift octave: ', octave,' layer: ', layer, ' scale: ', scale, 'size: ', kpt.size)
    return (octave, layer, scale) 
開發者ID:luigifreda,項目名稱:pyslam,代碼行數:18,代碼來源:utils_features.py

示例13: draw_match_2_side

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def draw_match_2_side(img1, kp1, img2, kp2, N):
    """Draw matches on 2 sides
    Args:
        img1 (HxW(xC) array): image 1
        kp1 (Nx2 array): keypoint for image 1
        img2 (HxW(xC) array): image 2
        kp2 (Nx2 array): keypoint for image 2
        N (int): number of matches to draw
    Returns:
        out_img (Hx2W(xC) array): output image with drawn matches
    """
    kp_list = np.linspace(0, min(kp1.shape[0], kp2.shape[0])-1, N,
                                            dtype=np.int
                                            )

    # Convert keypoints to cv2.Keypoint object
    cv_kp1 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp1[kp_list]]
    cv_kp2 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp2[kp_list]]

    out_img = np.array([])
    good_matches = [cv2.DMatch(_imgIdx=0, _queryIdx=idx, _trainIdx=idx,_distance=0) for idx in range(N)]
    out_img = cv2.drawMatches(img1, cv_kp1, img2, cv_kp2, matches1to2=good_matches, outImg=out_img)

    return out_img 
開發者ID:Huangying-Zhan,項目名稱:DF-VO,代碼行數:26,代碼來源:frame_drawer.py

示例14: load_features

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def load_features(self):
        if os.path.exists(self.features_file):
            #print "Loading " + self.features_file
            try:
                fp = gzip.open(self.features_file, "rb")
                feature_list = pickle.load(fp)
                fp.close()
                self.kp_list = []
                for point in feature_list:
                    kp = cv2.KeyPoint(x=point[0][0], y=point[0][1],
                                      _size=point[1], _angle=point[2],
                                      _response=point[3], _octave=point[4],
                                      _class_id=point[5])
                    self.kp_list.append(kp)
                return True
            except:
                print(self.features_file + ":\n" + "  feature load error: " \
                      + str(sys.exc_info()[0]) + ": " + str(sys.exc_info()[1]))
        return False 
開發者ID:UASLab,項目名稱:ImageAnalysis,代碼行數:21,代碼來源:image.py

示例15: draw_keypoints

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import KeyPoint [as 別名]
def draw_keypoints(
        img, instance_scores, keypoint_scores, keypoint_coords,
        min_pose_confidence=0.5, min_part_confidence=0.5):
    cv_keypoints = []
    for ii, score in enumerate(instance_scores):
        if score < min_pose_confidence:
            continue
        for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
            if ks < min_part_confidence:
                continue
            cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
    out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
    return out_img 
開發者ID:rwightman,項目名稱:posenet-pytorch,代碼行數:15,代碼來源:utils.py


注:本文中的cv2.KeyPoint方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。