本文整理汇总了Python中cv2.FeatureDetector_create方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.FeatureDetector_create方法的具体用法?Python cv2.FeatureDetector_create怎么用?Python cv2.FeatureDetector_create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.FeatureDetector_create方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: detectAndDescribe
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def detectAndDescribe(self, image):
# check to see if we are using OpenCV 3.X
if int(cv2.__version__[0]) >= 3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# otherwise, we are using OpenCV 2.4.X
else:
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)
# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)
# convert the keypoints from KeyPoint objects to NumPy arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
示例2: init_detector
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def init_detector(self):
"""Init keypoint detector object."""
# BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
if check_cv_version_is_new():
# OpenCV3/4, star/brief is in contrib module, you need to compile it seperately.
try:
self.star_detector = cv2.xfeatures2d.StarDetector_create()
self.brief_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
except:
import traceback
traceback.print_exc()
print("to use %s, you should build contrib with opencv3.0" % self.METHOD_NAME)
raise NoModuleError("There is no %s module in your OpenCV environment !" % self.METHOD_NAME)
else:
# OpenCV2.x
self.star_detector = cv2.FeatureDetector_create("STAR")
self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")
# create BFMatcher object:
self.matcher = cv2.BFMatcher(cv2.NORM_L1) # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable)
示例3: FeatureDetector_create
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def FeatureDetector_create(detector, *args, **kw_args):
"""
:param detector: string of the type of keypoint detector to return
:param args: positional arguments for detector
:param kw_args: keyword arguments for detector
:return: the key point detector object
"""
try:
detr = _DETECTOR_FACTORY[detector.upper()]
except KeyError:
if detector.upper() in _CONTRIB_FUNCS:
msg = "OpenCV needs to be compiled with opencv_contrib to support {}".format(detector)
raise AttributeError(msg)
raise AttributeError("{} not a supported detector".format(detector))
return detr(*args, **kw_args)
示例4: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def __init__ (self):
"""
PUBLIC: Constructor
-------------------
board_image: BoardImage object, the first frame
"""
#=====[ Step 1: set up feature extractors ]=====
'''
self.corner_detector = cv2.FeatureDetector_create ('HARRIS')
'''
self.sift_descriptor = cv2.xfeatures2d.SIFT_create()
####################################################################################################
##############################[ --- FIND BOARD CORNER CORRESPONDENCES --- ]#########################
####################################################################################################
示例5: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def __init__ (self):
"""
PUBLIC: Constructor
-------------------
board_image: BoardImage object, the first frame
"""
#=====[ Step 1: set up feature extractors ]=====
self.corner_detector = cv2.FeatureDetector_create ('HARRIS')
self.sift_descriptor = cv2.DescriptorExtractor_create('SIFT')
####################################################################################################
##############################[ --- FIND BOARD CORNER CORRESPONDENCES --- ]#########################
####################################################################################################
示例6: get_harris_corners
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def get_harris_corners (image):
"""
Function: get_harris_corners
----------------------------
given an image, returns a list of cv2.KeyPoints representing
the harris corners
"""
corner_detector = cv2.FeatureDetector_create ('HARRIS')
return corner_detector.detect (image)
示例7: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def __init__(self, storage):
super(SIFT_SIFT_Extractor, self).__init__(storage)
self.STORAGE_SUB_NAME = 'sift_sift'
self.sub_folder = self.storage.get_sub_folder(
self.STORAGE_SUPER_NAME, self.STORAGE_SUB_NAME)
self.storage.ensure_dir(self.sub_folder)
self._keypoint_detector = cv2.FeatureDetector_create("SIFT")
self._keypoint_extractor = cv2.DescriptorExtractor_create("SIFT")
示例8: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import FeatureDetector_create [as 别名]
def main(image_file):
image = Image.open(image_file)
if image is None:
print 'Could not load image "%s"' % sys.argv[1]
return
image = np.array(image.convert('RGB'), dtype=np.uint8)
image = image[:, :, ::-1].copy()
winSize = (200, 200)
stepSize = 32
roi = extractRoi(image, winSize, stepSize)
weight_map, mask_scale = next(roi)
samples = [(rect, scale, cv2.cvtColor(window, cv2.COLOR_BGR2GRAY))
for rect, scale, window in roi]
X_test = [window for rect, scale, window in samples]
coords = [(rect, scale) for rect, scale, window in samples]
extractor = cv2.FeatureDetector_create('SURF')
detector = cv2.DescriptorExtractor_create('SURF')
affine = AffineInvariant(extractor, detector)
saved = pickle.load(open('classifier.pkl', 'rb'))
feature_transform = saved['pipe']
model = saved['model']
print 'Extracting Affine transform invariant features'
affine_invariant_features = affine.transform(X_test)
print 'Matching features with template'
features = feature_transform.transform(affine_invariant_features)
rects = classify(model, features, coords, weight_map, mask_scale)
for (left, top, right, bottom) in non_max_suppression_fast(rects, 0.4):
cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 0), 10)
cv2.rectangle(image, (left, top), (right, bottom), (32, 32, 255), 5)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()