本文整理匯總了Python中cv2.NORM_HAMMING屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.NORM_HAMMING屬性的具體用法?Python cv2.NORM_HAMMING怎麽用?Python cv2.NORM_HAMMING使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.NORM_HAMMING屬性的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: init_detector
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_detector(self):
"""Init keypoint detector object."""
# BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
if check_cv_version_is_new():
# OpenCV3/4, star/brief is in contrib module, you need to compile it seperately.
try:
self.star_detector = cv2.xfeatures2d.StarDetector_create()
self.brief_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
except:
import traceback
traceback.print_exc()
print("to use %s, you should build contrib with opencv3.0" % self.METHOD_NAME)
raise NoModuleError("There is no %s module in your OpenCV environment !" % self.METHOD_NAME)
else:
# OpenCV2.x
self.star_detector = cv2.FeatureDetector_create("STAR")
self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")
# create BFMatcher object:
self.matcher = cv2.BFMatcher(cv2.NORM_L1) # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable)
示例2: init_feature
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_feature(name):
chunks = name.split('-')
if chunks[0] == 'sift':
detector = cv2.SIFT()
norm = cv2.NORM_L2
elif chunks[0] == 'surf':
detector = cv2.SURF(800)
norm = cv2.NORM_L2
elif chunks[0] == 'orb':
detector = cv2.ORB(400)
norm = cv2.NORM_HAMMING
else:
return None, None
if 'flann' in chunks:
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
else:
matcher = cv2.BFMatcher(norm)
return detector, matcher
示例3: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=kRatioTest, type = FeatureMatcherTypes.FLANN):
super().__init__(norm_type=norm_type, cross_check=cross_check, ratio_test=ratio_test, type=type)
if norm_type == cv2.NORM_HAMMING:
# FLANN parameters for binary descriptors
FLANN_INDEX_LSH = 6
self.index_params= dict(algorithm = FLANN_INDEX_LSH, # Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) # 2
if norm_type == cv2.NORM_L2:
# FLANN parameters for float descriptors
FLANN_INDEX_KDTREE = 1
self.index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)
self.search_params = dict(checks=32) # or pass empty dictionary
self.matcher = cv2.FlannBasedMatcher(self.index_params, self.search_params)
self.matcher_name = 'FlannFeatureMatcher'
示例4: matching
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def matching(desc1, desc2, do_ratio_test=False, cross_check=True):
if desc1.dtype == np.bool and desc2.dtype == np.bool:
desc1, desc2 = np.packbits(desc1, axis=1), np.packbits(desc2, axis=1)
norm = cv2.NORM_HAMMING
else:
desc1, desc2 = np.float32(desc1), np.float32(desc2)
norm = cv2.NORM_L2
if do_ratio_test:
matches = []
matcher = cv2.BFMatcher(norm)
for m, n in matcher.knnMatch(desc1, desc2, k=2):
m.distance = 1.0 if (n.distance == 0) else m.distance / n.distance
matches.append(m)
else:
matcher = cv2.BFMatcher(norm, crossCheck=cross_check)
matches = matcher.match(desc1, desc2)
return matches_cv2np(matches)
示例5: init_feature
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_feature(name):
chunks = name.split('-')
if chunks[0] == 'sift':
detector = cv2.SIFT()
norm = cv2.NORM_L2
elif chunks[0] == 'surf':
detector = cv2.SURF(400)
norm = cv2.NORM_L2
elif chunks[0] == 'orb':
detector = cv2.ORB(400)
norm = cv2.NORM_HAMMING
else:
return None, None
if 'flann' in chunks:
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
else:
matcher = cv2.BFMatcher(norm)
return detector, matcher
示例6: init_feature
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_feature(name):
chunks = name.split('-')
if chunks[0] == 'sift':
detector = cv2.xfeatures2d.SIFT_create()
norm = cv2.NORM_L2
elif chunks[0] == 'surf':
detector = cv2.xfeatures2d.SURF_create(800)
norm = cv2.NORM_L2
elif chunks[0] == 'orb':
detector = cv2.ORB_create(400)
norm = cv2.NORM_HAMMING
elif chunks[0] == 'akaze':
detector = cv2.AKAZE_create()
norm = cv2.NORM_HAMMING
elif chunks[0] == 'brisk':
detector = cv2.BRISK_create()
norm = cv2.NORM_HAMMING
else:
return None, None
if 'flann' in chunks:
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
else:
matcher = cv2.BFMatcher(norm)
return detector, matcher
示例7: get_match_coords
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def get_match_coords(keyPoints1, descriptors1,
keyPoints2, descriptors2,
matcher=cv2.BFMatcher,
norm=cv2.NORM_HAMMING,
ratio_test=0.7,
verbose=True,
**kwargs):
''' Filter matching keypoints and convert to X,Y coordinates
Parameters
----------
keyPoints1 : list - keypoints on img1 from find_key_points()
descriptors1 : list - descriptors on img1 from find_key_points()
keyPoints2 : list - keypoints on img2 from find_key_points()
descriptors2 : list - descriptors on img2 from find_key_points()
matcher : matcher from CV2
norm : int - type of distance
ratio_test : float - Lowe ratio
verbose : bool - print some output ?
Returns
-------
x1, y1, x2, y2 : coordinates of start and end of displacement [pixels]
'''
matches = _get_matches(descriptors1,
descriptors2, matcher, norm, verbose)
x1, y1, x2, y2 = _filter_matches(matches, ratio_test,
keyPoints1, keyPoints2, verbose)
return x1, y1, x2, y2
示例8: _get_matches
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def _get_matches(descriptors1, descriptors2, matcher, norm, verbose):
''' Match keypoints using BFMatcher with cv2.NORM_HAMMING '''
t0 = time.time()
bf = matcher(norm)
matches = bf.knnMatch(descriptors1, descriptors2, k=2)
t1 = time.time()
if verbose:
print('Keypoints matched', t1 - t0)
return matches
示例9: init_detector
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_detector(self):
"""Init keypoint detector object."""
self.detector = cv2.KAZE_create()
# create BFMatcher object:
self.matcher = cv2.BFMatcher(cv2.NORM_L1) # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable)
示例10: init_detector
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_detector(self):
"""Init keypoint detector object."""
self.detector = cv2.BRISK_create()
# create BFMatcher object:
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable)
示例11: feature_matcher_factory
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def feature_matcher_factory(norm_type=cv2.NORM_HAMMING, cross_check=False, ratio_test=kRatioTest, type=FeatureMatcherTypes.FLANN):
if type == FeatureMatcherTypes.BF:
return BfFeatureMatcher(norm_type=norm_type, cross_check=cross_check, ratio_test=ratio_test, type=type)
if type == FeatureMatcherTypes.FLANN:
return FlannFeatureMatcher(norm_type=norm_type, cross_check=cross_check, ratio_test=ratio_test, type=type)
return None
示例12: init_feature
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def init_feature(name):
chunks = name.split('-')
if chunks[0] == 'sift':
detector = cv2.xfeatures2d.SIFT()
norm = cv2.NORM_L2
elif chunks[0] == 'surf':
detector = cv2.xfeatures2d.SURF(800)
norm = cv2.NORM_L2
elif chunks[0] == 'orb':
detector = cv2.ORB(400)
norm = cv2.NORM_HAMMING
elif chunks[0] == 'akaze':
detector = cv2.AKAZE()
norm = cv2.NORM_HAMMING
elif chunks[0] == 'brisk':
detector = cv2.BRISK()
norm = cv2.NORM_HAMMING
else:
return None, None
if 'flann' in chunks:
if norm == cv2.NORM_L2:
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else:
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
else:
matcher = cv2.BFMatcher(norm)
return detector, matcher
示例13: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def __init__(self):
if hasattr(self, 'trained') and self.trained:
return
super(WarpFilterModel, self).__init__()
for lang in Localization.get_game_languages():
model_filename = IkaUtils.get_path(
'data', 'webcam_calibration.%s.model' % lang)
if os.path.exists(model_filename):
break
self.detector = cv2.AKAZE_create()
self.norm = cv2.NORM_HAMMING
self.matcher = cv2.BFMatcher(self.norm)
try:
self.loadModelFromFile(model_filename)
num_keypoints = len(self.calibration_image_keypoints)
IkaUtils.dprint('%s: Loaded model data\n %s (%d keypoints)' % (self, model_filename, num_keypoints))
except:
IkaUtils.dprint('%s: Could not load model data. Trying to rebuild...' % self)
calibration_image = cv2.imread('camera/ika_usbcam/Pause.png', 0)
self.calibration_image_size = calibration_image.shape[:2]
calibration_image_hight, calibration_image_width = \
calibration_image.shape[ :2]
self.calibration_image_keypoints, self.calibration_image_descriptors = \
self.detector.detectAndCompute( calibration_image, None)
print(self.calibration_image_keypoints)
print(self.calibration_image_descriptors)
model_filename = IkaUtils.get_path(
'data',
'webcam_calibration.%s.model' % Localization.get_game_languages()[0]
)
self.saveModelToFile(model_filename)
IkaUtils.dprint('%s: Created model %s' % (self, model_filename))
self.trained = True
示例14: configure
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import NORM_HAMMING [as 別名]
def configure():
global detect_scale
global the_matcher
global max_distance
global min_pairs
detect_scale = detector_node.getFloat('scale')
detector_str = detector_node.getString('detector')
if detector_str == 'SIFT' or detector_str == 'SURF':
norm = cv2.NORM_L2
max_distance = 270.0
elif detector_str == 'ORB' or detector_str == 'Star':
norm = cv2.NORM_HAMMING
max_distance = 64
else:
log("Detector not specified or not known:", detector_str)
quit()
# work around a feature/bug: flann enums don't exist
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
if norm == cv2.NORM_L2:
flann_params = {
'algorithm': FLANN_INDEX_KDTREE,
'trees': 5
}
else:
flann_params = {
'algorithm': FLANN_INDEX_LSH,
'table_number': 6, # 12
'key_size': 12, # 20
'multi_probe_level': 1 #2
}
search_params = {
'checks': 100
}
the_matcher = cv2.FlannBasedMatcher(flann_params, search_params)
min_pairs = matcher_node.getFloat('min_pairs')
# Iterate through all the matches for the specified image and
# delete keypoints that don't satisfy the homography (or
# fundamental) relationship. Returns true if match set is clean, false
# if keypoints were removed.
#
# Notice: this tends to eliminate matches that aren't all on the
# same plane, so if the scene has a lot of depth, this could knock
# out a lot of good matches.