本文整理匯總了Python中cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS屬性的具體用法?Python cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS怎麽用?Python cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS屬性的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: postprocessing_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def postprocessing_image(self, frame):
# Detect blobs.
keypoints = self.detector.detect(frame)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the
# circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame,
keypoints,
np.array([]),
(0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
leaves_data = self.k_means(keypoints)
frame = self.print_number_of_leaves(im_with_keypoints, leaves_data)
return frame
示例2: draw_skel_and_kp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def draw_skel_and_kp(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.5, min_part_score=0.5):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
示例3: draw_skel_and_kp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def draw_skel_and_kp(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.5, min_part_score=0.5):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
if cv_keypoints:
out_img = cv2.drawKeypoints(
out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
示例4: show_blobs_in_heatmap
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def show_blobs_in_heatmap(heatmap, blobs):
heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]),
(0,0,255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
[i,j] = np.unravel_index(heatmap.argmin(), heatmap.shape)
cv2.circle(heatmap_with_blobs, (j,i), 3, (0,255,0))
cv2.imshow("Heatmap Blobs", heatmap_with_blobs)
cv2.waitKey(0)
示例5: add_blobs
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([70,50,50])
upper_green = np.array([85,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
print "no blobs"
im_with_keypoints=crop_frame
return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
示例6: show_blobs_in_heatmap
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def show_blobs_in_heatmap(heatmap, blobs):
heatmap_with_blobs = cv2.drawKeypoints(heatmap, blobs, np.array([]),
(0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
[i, j] = np.unravel_index(heatmap.argmin(), heatmap.shape)
cv2.circle(heatmap_with_blobs, (j, i), 3, (0, 255, 0))
cv2.imshow("Heatmap Blobs", heatmap_with_blobs)
cv2.waitKey(0)
示例7: starDetection
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def starDetection(inputImg_edge):
imgStar=cv2.imread(inputImg_edge)
# imgGray=cv2.cvtColor(imgStar,cv2.COLOR_BGR2GRAY)
star=cv2.xfeatures2d.StarDetector_create()
keypoints=star.detect(imgStar)
# print(len(keypoints),keypoints)
cv2.drawKeypoints(imgStar,keypoints,imgStar,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('star features',imgStar)
cv2.imwrite(os.path.join(rootDirectory,'star features.jpg'),imgStar)
cv2.waitKey()
#sift圖像匹配
示例8: matchSift
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def matchSift(imgA,imgB):
img1 = cv2.imread(imgA, 0)
img2 = cv2.imread(imgB, 0)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None) #獲取SIFT關鍵點和描述子
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2) #根據描述子匹配圖像,返回n個最佳匹配
"""
. @param k Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.
The result of matches = bf.match(des1,des2) line is a list of DMatch objects. This DMatch object has following attributes:
DMatch.distance - Distance between descriptors. The lower, the better it is.
DMatch.trainIdx - Index of the descriptor in train descriptors
DMatch.queryIdx - Index of the descriptor in query descriptors
DMatch.imgIdx - Index of the train image.
參看:https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
"""
print(type(matches),matches[:2],(matches[0][0].distance,matches[0][1].distance))
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance: #因為k=2,因此返回距離最近和次近關鍵點,比較最近和次近,滿足最近/次近<value,才被認為匹配。ratio test explained by D.Lowe in his paper
good.append([m])
imgM = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[0:int(1*len(good)):int(0.1*len(good))], None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
fig, ax=plt.subplots(figsize=(50,30))
ax.imshow(imgM), plt.show()
# cv2.imshow('matchSift',imgM)
# cv2.waitKey()
示例9: siftDetection
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS [as 別名]
def siftDetection(inputImg_edge):
imgSift=cv2.imread(inputImg_edge)
imgGray=cv2.cvtColor(imgSift,cv2.COLOR_BGR2GRAY)
print(imgGray.shape)
sift=cv2.xfeatures2d.SIFT_create() #SIFT特征實例化
keypoints=sift.detect(imgGray,None) #提取SIFT特征關鍵點detector
print(keypoints[:3],len(keypoints))
for k in keypoints[:3]:
print(k.pt,k.size,k.octave,k.response,k.class_id,k.angle)
"""
關鍵點信息包含:
k.pt關鍵點點的坐標(圖像像素位置)
k.size該點直徑的大小
k.octave從高斯金字塔的哪一層提取得到的數據
k.response響應程度,代表該點強壯大小,即角點的程度。角點:極值點,某方麵屬性特別突出的點(最大或最小)。
k.class_id對圖像進行分類時,可以用class_id對每個特征點進行區分,未設置時為-1
k.angle角度,關鍵點的方向。SIFT算法通過對鄰域做梯度運算,求得該方向。-1為初始值
"""
des = sift.compute(imgGray,keypoints) #提取SIFT調整描述子descriptor
print(type(keypoints),type(des))
print(des[0][:2]) #關鍵點
print(des[1][:2]) #描述子(關鍵點周圍對其有貢獻的像素點)
print(des[1].shape)
imgSift=np.copy(imgSift)
cv2.drawKeypoints(imgSift,keypoints,imgSift,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
"""
help(cv2.drawKeypoints)
Help on built-in function drawKeypoints:
drawKeypoints(...)
drawKeypoints(image, keypoints, outImage[, color[, flags]]) -> outImage
. @brief Draws keypoints.
.
. @param image Source image. 原始圖像(3通道或單通道)
. @param keypoints Keypoints from the source image. 關鍵點(特征點向量),向量內每一個元素是一個keypoint對象,包含特征點的各種屬性特征
. @param outImage Output image. Its content depends on the flags value defining what is drawn in the. output image. See possible flags bit values below.
特征點繪製的畫布圖像(可以是原始圖像)。標記類型,參看@note部分
. @param color Color of keypoints. 顯示顏色,默認隨機彩色
. @param flags Flags setting drawing features. Possible flags bit values are defined by.DrawMatchesFlags. See details above in drawMatches .
.
. @note 特征點的 繪製模式,即繪製特征點的哪些信息
. For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT,
. cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
. cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
"""
cv2.imshow('sift features',imgSift)
cv2.imwrite(os.path.join(rootDirectory,'sift features.jpg'),imgSift)
cv2.waitKey()
#star特征檢測器