本文整理汇总了Python中feature_extractor.FeatureExtractor.compute_descriptors方法的典型用法代码示例。如果您正苦于以下问题:Python FeatureExtractor.compute_descriptors方法的具体用法?Python FeatureExtractor.compute_descriptors怎么用?Python FeatureExtractor.compute_descriptors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类feature_extractor.FeatureExtractor
的用法示例。
在下文中一共展示了FeatureExtractor.compute_descriptors方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Tester
# 需要导入模块: from feature_extractor import FeatureExtractor [as 别名]
# 或者: from feature_extractor.FeatureExtractor import compute_descriptors [as 别名]
class Tester(object):
def __init__(self, numGestures, minDescriptorsPerFrame, numWords, descType, numPredictions, parent):
self.numGestures = numGestures
self.numWords = numWords
self.minDescriptorsPerFrame = minDescriptorsPerFrame
self.parent = parent
self.classifier = None
self.windowName = "Testing preview"
self.handWindowName = "Cropped hand"
self.binaryWindowName = "Binary frames"
self.predictionList = [-1]*numPredictions;
self.handTracker = HandTracker(kernelSize=7, thresholdAngle=0.4, defectDistFromHull=30, parent=self)
self.featureExtractor = FeatureExtractor(type=descType, parent=self)
self.numSideFrames = 10
self.prevFrameList = np.zeros((self.numSideFrames,self.parent.imHeight/self.numSideFrames,self.parent.imWidth/self.numSideFrames,3), "uint8")
self.numPrevFrames = 0
self.predictionScoreThreshold = 0.2
self.learningRate = 0.01
self.numReinforce = 1
def initialize(self, clf):
self.classifier = clf
self.numWords = self.classifier.voc.shape[0]
self.prevStates = np.zeros((self.numSideFrames, self.numWords), "float32")
self.prevLabels = [0]*self.numSideFrames
self.prevScores = [0]*self.numSideFrames
def test_on_video(self):
vc = self.parent.vc
while(vc.isOpened()):
ret,im = vc.read()
im = cv2.flip(im, 1)
imhsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
self.handTracker.colorProfiler.draw_color_windows(im, imhsv)
cv2.imshow(self.windowName, im)
k = cv2.waitKey(1)
if k == 32: # space
break
elif k == 27:
sys.exit(0)
self.handTracker.colorProfiler.run()
binaryIm = self.handTracker.get_binary_image(imhsv)
cnt,hull,centroid,defects = self.handTracker.initialize_contour(binaryIm)
cv2.namedWindow(self.binaryWindowName)
cv2.namedWindow(self.handWindowName)
cv2.namedWindow(self.windowName)
cv2.setMouseCallback(self.windowName, self.reinforce)
while(vc.isOpened()):
ret,im = vc.read()
im = cv2.flip(im, 1)
imhsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
binaryIm = self.handTracker.get_binary_image(imhsv)
cnt,hull,centroid,defects = self.handTracker.get_contour(binaryIm)
imCopy = 1*im
testData = None
prediction = -1
score = -1
update = False
if cnt is not None:
numDefects = defects.shape[0]
cropImage,cropPoints = self.handTracker.get_cropped_image_from_cnt(im, cnt, 0.05)
cropImageGray = self.handTracker.get_cropped_image_from_points(imgray, cropPoints)
#cv2.fillPoly(binaryIm, cnt, 255)
#cropImageBinary = self.handTracker.get_cropped_image_from_points(binaryIm, cropPoints)
#cropImageGray = self.apply_binary_mask(cropImageGray, cropImageBinary, 5)
#kp,des = self.featureExtractor.get_keypoints_and_descriptors(cropImageGray)
kp = self.featureExtractor.get_keypoints(cropImageGray)
cropCnt = self.handTracker.get_cropped_contour(cnt, cropPoints)
kp = self.featureExtractor.get_keypoints_in_contour(kp, cropCnt)
kp,des = self.featureExtractor.compute_descriptors(cropImageGray, kp)
if des is not None and des.shape[0] >= 0:
self.featureExtractor.draw_keypoints(cropImage, kp)
if des is not None and des.shape[0] >= self.minDescriptorsPerFrame and self.is_hand(defects):
words, distance = vq(des, self.classifier.voc)
testData = np.zeros(self.numWords, "float32")
for w in words:
testData[w] += 1
normTestData = np.linalg.norm(testData, ord=2) * np.ones(self.numWords)
testData = np.divide(testData, normTestData)
prediction,score = self.predict(testData)
sortedScores = np.sort(score)
#if max(score) > self.predictionScoreThreshold:
if sortedScores[-1]-sortedScores[-2] >= self.predictionScoreThreshold:
self.handTracker.draw_on_image(imCopy, cnt=False, hullColor=(0,255,0))
else:
self.handTracker.draw_on_image(imCopy, cnt=False, hullColor=(255,0,0))
prediction = -1
update = True
else:
self.handTracker.draw_on_image(imCopy, cnt=False, hullColor=(0,0,255))
prediction = -1
cv2.imshow(self.handWindowName,cropImage)
else:
prediction = -1
#self.insert_to_prediction_list(prediction)
#prediction,predictionCount = self.most_common(self.predictionList)
#if prediction>=0:
#.........这里部分代码省略.........