当前位置: 首页>>代码示例>>Python>>正文


Python cv2.drawKeypoints方法代码示例

本文整理汇总了Python中cv2.drawKeypoints方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.drawKeypoints方法的具体用法?Python cv2.drawKeypoints怎么用?Python cv2.drawKeypoints使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.drawKeypoints方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_fast_det

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def compute_fast_det(filename, is_nms=True, thresh = 10):

    img = cv2.imread(filename)
    
    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector_create() #FastFeatureDetector()

    # find and draw the keypoints
    if not is_nms:
        fast.setNonmaxSuppression(0)

    fast.setThreshold(thresh)

    kp = fast.detect(img,None)
    cv2.drawKeypoints(img, kp, img, color=(255,0,0))
    
    return img 
开发者ID:PacktPublishing,项目名称:Practical-Computer-Vision,代码行数:19,代码来源:04_fast_feature.py

示例2: postprocessing_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def postprocessing_image(self, frame):
        # Detect blobs.
        keypoints = self.detector.detect(frame)

        # Draw detected blobs as red circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the
        # circle corresponds to the size of blob
        im_with_keypoints = cv2.drawKeypoints(frame,
                                              keypoints,
                                              np.array([]),
                                              (0, 0, 255),
                                              cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        leaves_data = self.k_means(keypoints)
        frame = self.print_number_of_leaves(im_with_keypoints, leaves_data)
        return frame 
开发者ID:OpenAgricultureFoundation,项目名称:openag_cv,代码行数:18,代码来源:BlobDetector.py

示例3: draw_skel_and_kp

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def draw_skel_and_kp(
        img, instance_scores, keypoint_scores, keypoint_coords,
        min_pose_score=0.5, min_part_score=0.5):
    out_img = img
    adjacent_keypoints = []
    cv_keypoints = []
    for ii, score in enumerate(instance_scores):
        if score < min_pose_score:
            continue

        new_keypoints = get_adjacent_keypoints(
            keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
        adjacent_keypoints.extend(new_keypoints)

        for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
            if ks < min_part_score:
                continue
            cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))

    out_img = cv2.drawKeypoints(
        out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
        flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
    return out_img 
开发者ID:rwightman,项目名称:posenet-python,代码行数:26,代码来源:utils.py

示例4: draw_keypoints

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def draw_keypoints(
        img, instance_scores, keypoint_scores, keypoint_coords,
        min_pose_confidence=0.5, min_part_confidence=0.5):
    cv_keypoints = []
    for ii, score in enumerate(instance_scores):
        if score < min_pose_confidence:
            continue
        for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
            if ks < min_part_confidence:
                continue
            cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
    out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
    return out_img 
开发者ID:rwightman,项目名称:posenet-pytorch,代码行数:15,代码来源:utils.py

示例5: draw_skel_and_kp

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def draw_skel_and_kp(
        img, instance_scores, keypoint_scores, keypoint_coords,
        min_pose_score=0.5, min_part_score=0.5):

    out_img = img
    adjacent_keypoints = []
    cv_keypoints = []
    for ii, score in enumerate(instance_scores):
        if score < min_pose_score:
            continue

        new_keypoints = get_adjacent_keypoints(
            keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
        adjacent_keypoints.extend(new_keypoints)

        for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
            if ks < min_part_score:
                continue
            cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))

    if cv_keypoints:
        out_img = cv2.drawKeypoints(
            out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
    return out_img 
开发者ID:rwightman,项目名称:posenet-pytorch,代码行数:28,代码来源:utils.py

示例6: draw_keypoints

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def draw_keypoints(self, name='keypoints', delay=1):
        if self.image.ndim == 2:
            image = np.repeat(self.image[..., np.newaxis], 3, axis=2)
        else:
            image = self.image
        img = cv2.drawKeypoints(image, self.keypoints, None, flags=0)
        cv2.imshow(name, img);cv2.waitKey(delay) 
开发者ID:uoip,项目名称:rgbd_ptam,代码行数:9,代码来源:feature.py

示例7: show_blobs_in_heatmap

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def show_blobs_in_heatmap(heatmap, blobs):
    heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]),
                                           (0,0,255),
                                           cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    [i,j] = np.unravel_index(heatmap.argmin(), heatmap.shape)
    cv2.circle(heatmap_with_blobs, (j,i), 3, (0,255,0))
    cv2.imshow("Heatmap Blobs", heatmap_with_blobs)
    cv2.waitKey(0) 
开发者ID:Guanghan,项目名称:lighttrack,代码行数:11,代码来源:utils_nms.py

示例8: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def main():
    org_image = cv2.imread("../data/house.tiff", 1)
    '''
    SURF is better than SIFT and computes and detects feature fast, 
    but unfortunately both are paid.

    Alternative, we have ORB by OpenCV. Free. OSS.
    PARAM: nfeatures : Number of features to be detected.
                       Default value is around 100.
    '''

    sift = cv2.xfeatures2d.SIFT_create()
    surf = cv2.xfeatures2d.SURF_create()
    orb = cv2.ORB_create(nfeatures=1000)

    kp_sift, decep_sift = sift.detectAndCompute(org_image, None)
    kp_surf, decep_sift = surf.detectAndCompute(org_image, None)
    kp_orb, decep_sift = orb.detectAndCompute(org_image, None)

    org_image_sift = cv2.drawKeypoints(org_image, kp_sift, None)
    org_image_surf = cv2.drawKeypoints(org_image, kp_surf, None)
    org_image_orb = cv2.drawKeypoints(org_image, kp_orb, None)

    cv2.imshow("SIFT Features Detected", org_image_sift)
    cv2.imshow("SURF Features Detected", org_image_surf)
    cv2.imshow("ORB Features Detected", org_image_orb)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
开发者ID:amarlearning,项目名称:Finger-Detection-and-Tracking,代码行数:31,代码来源:featureDetection.py

示例9: add_blobs

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def add_blobs(crop_frame):
    frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
    # Convert BGR to HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of green color in HSV
    lower_green = np.array([70,50,50])
    upper_green = np.array([85,255,255])
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_green, upper_green)
    mask = cv2.erode(mask, None, iterations=1)
    mask = cv2.dilate(mask, None, iterations=1)    
    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame,frame, mask= mask)
    detector = cv2.SimpleBlobDetector_create(params)
    # Detect blobs.
    reversemask=255-mask
    keypoints = detector.detect(reversemask)
    if keypoints:
        print "found blobs"
        if len(keypoints) > 4:
            keypoints.sort(key=(lambda s: s.size))
            keypoints=keypoints[0:3]
        # Draw detected blobs as red circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
        im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    else:
        print "no blobs"
        im_with_keypoints=crop_frame
        
    return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders 
开发者ID:perrytsao,项目名称:pc-drone,代码行数:32,代码来源:webcam_track_blobs.py

示例10: detect

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def detect(self, frame, mask=None, filter=True): 
        if not self.need_color_image and frame.ndim>2:                                    # check if we have to convert to gray image 
            frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)                    
        if self.use_pyramid_adaptor:  
            # detection with pyramid adaptor (it can optionally include a block adaptor per level)
            kps = self.pyramid_adaptor.detect(frame, mask)            
        elif self.use_bock_adaptor:   
            # detection with block adaptor 
            kps = self.block_adaptor.detect(frame, mask)            
        else:                         
            # standard detection      
            kps = self._feature_detector.detect(frame, mask)  
        # filter keypoints    
        filter_name = 'NONE'   
        if filter:   
            kps, _, filter_name  = self.filter_keypoints(self.keypoint_filter_type, frame, kps) 
        # if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors       
        if self.do_keypoints_size_rescaling:
            self.rescale_keypoint_size(kps)             
        if kDrawOriginalExtractedFeatures: # draw the original features
            imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0)
            cv2.imshow('detected keypoints',imgDraw)            
        if kVerbose:
            print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']')    
        return kps        
    
    
    # compute the descriptors once given the keypoints 
开发者ID:luigifreda,项目名称:pyslam,代码行数:30,代码来源:feature_manager.py

示例11: func2

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def func2(path):    
    frame = cv2.imread(path)
    frame = cv2.resize(frame,(128,128))
    converted2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert from RGB to HSV
    #cv2.imshow("original",converted2)

    lowerBoundary = np.array([0,40,30],dtype="uint8")
    upperBoundary = np.array([43,255,254],dtype="uint8")
    skinMask = cv2.inRange(converted, lowerBoundary, upperBoundary)
    skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0)
    #cv2.imshow("masked",skinMask)
    
    skinMask = cv2.medianBlur(skinMask, 5)
    
    skin = cv2.bitwise_and(converted2, converted2, mask = skinMask)
    
    #cv2.imshow("masked2",skin)
    img2 = cv2.Canny(skin,60,60)
    #cv2.imshow("edge detection",img2)
    img2 = cv2.resize(img2,(256,256))
    orb = cv2.xfeatures2d.ORB_create()
    kp, des = orb.detectAndCompute(img2,None)

    #print(len(des2))
    img2 = cv2.drawKeypoints(img2,kp,None,color=(0,255,0), flags=0)
    #plt.imshow(img2),plt.show()
    
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    return des2
#func("001.jpg") 
开发者ID:imRishabhGupta,项目名称:Indian-Sign-Language-Recognition,代码行数:34,代码来源:surf_image_processing.py

示例12: feature_detection

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def feature_detection(img1, points1): 
	fast = cv2.FastFeatureDetector(20)		#sets the threshold
	fast.setBool('nonmaxSuppression',1)		#makes non-maxsupresison true 
	kp = fast.detect(img1,None) 
	cd_x=np.array([k.pt[0] for k in kp])
	cd_y=np.array([k.pt[1] for k in kp])
	for i in range(len(cd_x)):
		points1.append([[cd_x[i],cd_y[i]]])
	#img1 = cv2.drawKeypoints(img1,kp,img1)		#for testing keypoint generation
	#cv2.imwrite('kp_test.png',img1)
        
#test feature detection 
#points1= []
#feature_detection(img,points1)
#print points1 
开发者ID:karanchawla,项目名称:Monocular-Visual-Inertial-Odometry,代码行数:17,代码来源:vo.py

示例13: draw_keyp

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def draw_keyp(img, kp):
    """
    Takes image and keypoints and plots on the same images
    Does not display it. 
    """
    cv2.drawKeypoints(img,kp,img, color=(255,0,0), flags=2) 
    return img 
开发者ID:PacktPublishing,项目名称:Practical-Computer-Vision,代码行数:9,代码来源:04_orb_detections.py

示例14: compute_sift_features

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def compute_sift_features(img):
    gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    sift = cv2.xfeatures2d.SIFT_create()
    kp = sift.detect(gray,None) 
    img=cv2.drawKeypoints(gray,kp)
    plt.figure(figsize=(12, 8))
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.axis('off')
    plt.show() 
开发者ID:PacktPublishing,项目名称:Practical-Computer-Vision,代码行数:11,代码来源:04_sift_features.py

示例15: compute_fast_det

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import drawKeypoints [as 别名]
def compute_fast_det(img, is_nms=True, thresh = 10):
    gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector_create() #FastFeatureDetector()

#     # find and draw the keypoints
    if not is_nms:
        fast.setNonmaxSuppression(0)

    fast.setThreshold(thresh)

    kp = fast.detect(img,None)
    cv2.drawKeypoints(img, kp, img, color=(255,0,0))
    
    

    sift = cv2.SIFT()
    kp = sift.detect(gray,None)

    img=cv2.drawKeypoints(gray,kp)

    plt.figure(figsize=(12, 8))
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.axis('off')
    plt.show() 
开发者ID:PacktPublishing,项目名称:Practical-Computer-Vision,代码行数:28,代码来源:04_sift_features.py


注:本文中的cv2.drawKeypoints方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。