本文整理匯總了Python中cv2.drawKeypoints方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.drawKeypoints方法的具體用法?Python cv2.drawKeypoints怎麽用?Python cv2.drawKeypoints使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.drawKeypoints方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: compute_fast_det
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def compute_fast_det(filename, is_nms=True, thresh = 10):
img = cv2.imread(filename)
# Initiate FAST object with default values
fast = cv2.FastFeatureDetector_create() #FastFeatureDetector()
# find and draw the keypoints
if not is_nms:
fast.setNonmaxSuppression(0)
fast.setThreshold(thresh)
kp = fast.detect(img,None)
cv2.drawKeypoints(img, kp, img, color=(255,0,0))
return img
示例2: postprocessing_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def postprocessing_image(self, frame):
# Detect blobs.
keypoints = self.detector.detect(frame)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the
# circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame,
keypoints,
np.array([]),
(0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
leaves_data = self.k_means(keypoints)
frame = self.print_number_of_leaves(im_with_keypoints, leaves_data)
return frame
示例3: draw_skel_and_kp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def draw_skel_and_kp(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.5, min_part_score=0.5):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
示例4: draw_keypoints
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def draw_keypoints(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_confidence:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
return out_img
示例5: draw_skel_and_kp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def draw_skel_and_kp(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.5, min_part_score=0.5):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
if cv_keypoints:
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
示例6: draw_keypoints
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def draw_keypoints(self, name='keypoints', delay=1):
if self.image.ndim == 2:
image = np.repeat(self.image[..., np.newaxis], 3, axis=2)
else:
image = self.image
img = cv2.drawKeypoints(image, self.keypoints, None, flags=0)
cv2.imshow(name, img);cv2.waitKey(delay)
示例7: show_blobs_in_heatmap
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def show_blobs_in_heatmap(heatmap, blobs):
heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]),
(0,0,255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
[i,j] = np.unravel_index(heatmap.argmin(), heatmap.shape)
cv2.circle(heatmap_with_blobs, (j,i), 3, (0,255,0))
cv2.imshow("Heatmap Blobs", heatmap_with_blobs)
cv2.waitKey(0)
示例8: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def main():
org_image = cv2.imread("../data/house.tiff", 1)
'''
SURF is better than SIFT and computes and detects feature fast,
but unfortunately both are paid.
Alternative, we have ORB by OpenCV. Free. OSS.
PARAM: nfeatures : Number of features to be detected.
Default value is around 100.
'''
sift = cv2.xfeatures2d.SIFT_create()
surf = cv2.xfeatures2d.SURF_create()
orb = cv2.ORB_create(nfeatures=1000)
kp_sift, decep_sift = sift.detectAndCompute(org_image, None)
kp_surf, decep_sift = surf.detectAndCompute(org_image, None)
kp_orb, decep_sift = orb.detectAndCompute(org_image, None)
org_image_sift = cv2.drawKeypoints(org_image, kp_sift, None)
org_image_surf = cv2.drawKeypoints(org_image, kp_surf, None)
org_image_orb = cv2.drawKeypoints(org_image, kp_orb, None)
cv2.imshow("SIFT Features Detected", org_image_sift)
cv2.imshow("SURF Features Detected", org_image_surf)
cv2.imshow("ORB Features Detected", org_image_orb)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例9: add_blobs
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([70,50,50])
upper_green = np.array([85,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
print "no blobs"
im_with_keypoints=crop_frame
return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
示例10: detect
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def detect(self, frame, mask=None, filter=True):
if not self.need_color_image and frame.ndim>2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
if self.use_pyramid_adaptor:
# detection with pyramid adaptor (it can optionally include a block adaptor per level)
kps = self.pyramid_adaptor.detect(frame, mask)
elif self.use_bock_adaptor:
# detection with block adaptor
kps = self.block_adaptor.detect(frame, mask)
else:
# standard detection
kps = self._feature_detector.detect(frame, mask)
# filter keypoints
filter_name = 'NONE'
if filter:
kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps)
# if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors
if self.do_keypoints_size_rescaling:
self.rescale_keypoint_size(kps)
if kDrawOriginalExtractedFeatures: # draw the original features
imgDraw = cv2.drawKeypoints(frame, kps, None, color=(0,255,0), flags=0)
cv2.imshow('detected keypoints',imgDraw)
if kVerbose:
print('detector:',self.detector_type.name,', #features:', len(kps),', [kp-filter:',filter_name,']')
return kps
# compute the descriptors once given the keypoints
示例11: func2
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def func2(path):
frame = cv2.imread(path)
frame = cv2.resize(frame,(128,128))
converted2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert from RGB to HSV
#cv2.imshow("original",converted2)
lowerBoundary = np.array([0,40,30],dtype="uint8")
upperBoundary = np.array([43,255,254],dtype="uint8")
skinMask = cv2.inRange(converted, lowerBoundary, upperBoundary)
skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0)
#cv2.imshow("masked",skinMask)
skinMask = cv2.medianBlur(skinMask, 5)
skin = cv2.bitwise_and(converted2, converted2, mask = skinMask)
#cv2.imshow("masked2",skin)
img2 = cv2.Canny(skin,60,60)
#cv2.imshow("edge detection",img2)
img2 = cv2.resize(img2,(256,256))
orb = cv2.xfeatures2d.ORB_create()
kp, des = orb.detectAndCompute(img2,None)
#print(len(des2))
img2 = cv2.drawKeypoints(img2,kp,None,color=(0,255,0), flags=0)
#plt.imshow(img2),plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
return des2
#func("001.jpg")
示例12: feature_detection
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def feature_detection(img1, points1):
fast = cv2.FastFeatureDetector(20) #sets the threshold
fast.setBool('nonmaxSuppression',1) #makes non-maxsupresison true
kp = fast.detect(img1,None)
cd_x=np.array([k.pt[0] for k in kp])
cd_y=np.array([k.pt[1] for k in kp])
for i in range(len(cd_x)):
points1.append([[cd_x[i],cd_y[i]]])
#img1 = cv2.drawKeypoints(img1,kp,img1) #for testing keypoint generation
#cv2.imwrite('kp_test.png',img1)
#test feature detection
#points1= []
#feature_detection(img,points1)
#print points1
示例13: draw_keyp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def draw_keyp(img, kp):
"""
Takes image and keypoints and plots on the same images
Does not display it.
"""
cv2.drawKeypoints(img,kp,img, color=(255,0,0), flags=2)
return img
示例14: compute_sift_features
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def compute_sift_features(img):
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray,None)
img=cv2.drawKeypoints(gray,kp)
plt.figure(figsize=(12, 8))
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
示例15: compute_fast_det
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import drawKeypoints [as 別名]
def compute_fast_det(img, is_nms=True, thresh = 10):
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Initiate FAST object with default values
fast = cv2.FastFeatureDetector_create() #FastFeatureDetector()
# # find and draw the keypoints
if not is_nms:
fast.setNonmaxSuppression(0)
fast.setThreshold(thresh)
kp = fast.detect(img,None)
cv2.drawKeypoints(img, kp, img, color=(255,0,0))
sift = cv2.SIFT()
kp = sift.detect(gray,None)
img=cv2.drawKeypoints(gray,kp)
plt.figure(figsize=(12, 8))
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()