本文整理汇总了Python中cv2.findFundamentalMat方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.findFundamentalMat方法的具体用法?Python cv2.findFundamentalMat怎么用?Python cv2.findFundamentalMat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.findFundamentalMat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: computeFundamentalMatrix
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def computeFundamentalMatrix(self, kps_ref, kps_cur):
F, mask = cv2.findFundamentalMat(kps_ref, kps_cur, cv2.FM_RANSAC, param1=kRansacThresholdPixels, param2=kRansacProb)
if F is None or F.shape == (1, 1):
# no fundamental matrix found
raise Exception('No fundamental matrix found')
elif F.shape[0] > 3:
# more than one matrix found, just pick the first
F = F[0:3, 0:3]
return np.matrix(F), mask
示例2: eval_model
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def eval_model(pts, side_info, model, device, postprocess=True):
pts_orig = pts.copy()
pts = torch.from_numpy(pts).to(device).unsqueeze(0)
side_info = torch.from_numpy(side_info).to(torch.float).to(device).unsqueeze(0)
F_est, rescaling_1, rescaling_2, weights = model(pts, side_info)
F_est = rescaling_1.permute(0, 2, 1).bmm(F_est[-1].bmm(rescaling_2))
F_est = F_est / F_est[:, -1, -1].unsqueeze(-1).unsqueeze(-1)
F = F_est[0].data.cpu().numpy()
weights = weights[0, 0].data.cpu().numpy()
F_best = F
if postprocess:
inliers_best = np.sum(compute_residual(pts_orig, F) <= 1.0)
for th in [25, 50, 75]:
perc = np.percentile(weights, th)
good = np.where(weights > perc)[0]
if len(good) < 9:
continue
pts_ = pts_orig[good]
F, _ = cv2.findFundamentalMat(pts_[:, 2:], pts_[:, :2], cv2.FM_LMEDS)
inliers = np.sum(compute_residual(pts_orig, F) <= 1.0)
if inliers > inliers_best:
F_best = F
inliers_best = inliers
return F_best
示例3: close_loop
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def close_loop(db, dbkp, descr, kp):
matcher = cv2.BFMatcher(cv2.NORM_L2)
kp, kp_d = kp
db = np.concatenate(tuple(db), axis=0)
sim = np.sum(descr * db, axis=-1)
top_k_sim_ind = np.argpartition(sim, -K)[-K:]
max_sim = -1.0
i_max_sim = -1
best_match_tuple = None
for k in top_k_sim_ind:
db_kp, db_kp_d = dbkp[k]
matches = matcher.knnMatch(kp_d, db_kp_d, 2)
good = []
pts1 = []
pts2 = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
pts1.append(db_kp[m.trainIdx].pt)
pts2.append(kp[m.queryIdx].pt)
if len(good) > 7:
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
curr_sim = sim[k]
if curr_sim > max_sim:
max_sim = curr_sim
i_max_sim = k
best_match_tuple = (kp, db_kp, good, pts1, pts2)
if i_max_sim > -1:
F, mask = cv2.findFundamentalMat(best_match_tuple[3],
best_match_tuple[4], cv2.FM_RANSAC)
if F is None:
max_sim=-1.0
i_max_sim = -1
return i_max_sim
示例4: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def main():
scale = 1.
retval1, img_1 = cv2.imread("",0) #read images and convert to grayscale
retval2, img_2 = cv2.imread("",0)
if (retval1 or retval2 == 0):
print "Reading image failed. Please try again"
return -1
points1 = np.array([[]]) #lists to store feature points
points2 = np.array([[]])
feature_detection(img1,points1)
status = np.array([])
feature_tracking(img_1,img_2,points1,points2, status)
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC, 0.1, 0.99)
E = mtx.T.dot(F).dot(K)
示例5: compute_fundamental_matrix
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def compute_fundamental_matrix(filename1, filename2):
"""
Takes in filenames of two input images
Return Fundamental matrix computes
using 8 point algorithm
"""
# compute ORB keypoints and descriptor for each image
img1, kp1, des1 = compute_orb_keypoints(filename1)
img2, kp2, des2 = compute_orb_keypoints(filename2)
# compute keypoint matches using descriptor
matches = brute_force_matcher(des1, des2)
# extract points
pts1 = []
pts2 = []
for i,(m) in enumerate(matches):
if m.distance < 20:
#print(m.distance)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.asarray(pts1)
pts2 = np.asarray(pts2)
# Compute fundamental matrix
F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_8POINT)
return F
示例6: projectPoint
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def projectPoint( self, point, H ,whichImage):
"""
**SUMMARY**
This method returns the corresponding point (x, y)
**PARAMETERS**
* *point* - Input point (x, y)
* *whichImage* - Index of the image (1 or 2) that contains the point
* *H* - Homography that can be estimated
using StereoCamera.findHomography()
**RETURNS**
Corresponding point (x, y) as tuple
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> projectPoint = stereoImg.projectPoint(point,H ,1) #finds corresponding point in the left image.
"""
H = np.matrix(H)
point = np.matrix((point[1], point[0],1.00))
if whichImage == 1.00:
corres_pt = H * point.T
else:
corres_pt = np.linalg.inv(H) * point.T
corres_pt = corres_pt / corres_pt[2]
return (float(corres_pt[1]), float(corres_pt[0]))
示例7: filterFeatures
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
M = None
if len(p1) < 7:
# not enough points
return None, np.zeros(total), [], []
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
elif method == 'none':
M = None
status = np.ones(total)
newp1 = []
newp2 = []
for i, flag in enumerate(status):
if flag:
newp1.append(p1[i])
newp2.append(p2[i])
p1 = np.float32(newp1)
p2 = np.float32(newp2)
inliers = np.sum(status)
total = len(status)
#print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status))
return M, status, np.float32(newp1), np.float32(newp2)
示例8: filterFeatures
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
while inliers < total and total >= 7:
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
elif method == 'none':
M = none
status = np.ones(total)
newp1 = []
newp2 = []
for i, flag in enumerate(status):
if flag:
newp1.append(p1[i])
newp2.append(p2[i])
p1 = np.float32(newp1)
p2 = np.float32(newp2)
inliers = np.sum(status)
total = len(status)
#print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status))
space += " "
return M, status, np.float32(newp1), np.float32(newp2)
示例9: get_matches
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def get_matches(self, feat1, feat2, cv_kpts1, cv_kpts2, ratio=None, cross_check=True, err_thld=4, info=''):
"""Compute putative and inlier matches.
Args:
feat: (n_kpts, 128) Local features.
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
ratio: The threshold to apply ratio test.
cross_check: (True by default) Whether to apply cross check.
err_thld: Epipolar error threshold.
info: Info to print out.
Returns:
good_matches: Putative matches.
mask: The mask to distinguish inliers/outliers on putative matches.
"""
init_matches1 = self.matcher.knnMatch(feat1, feat2, k=2)
init_matches2 = self.matcher.knnMatch(feat2, feat1, k=2)
good_matches = []
for i in range(len(init_matches1)):
cond = True
if cross_check:
cond1 = cross_check and init_matches2[init_matches1[i][0].trainIdx][0].trainIdx == i
cond *= cond1
if ratio is not None:
cond2 = init_matches1[i][0].distance <= ratio * init_matches1[i][1].distance
cond *= cond2
if cond:
good_matches.append(init_matches1[i][0])
if type(cv_kpts1) is list and type(cv_kpts2) is list:
good_kpts1 = np.array([cv_kpts1[m.queryIdx].pt for m in good_matches])
good_kpts2 = np.array([cv_kpts2[m.trainIdx].pt for m in good_matches])
elif type(cv_kpts1) is np.ndarray and type(cv_kpts2) is np.ndarray:
good_kpts1 = np.array([cv_kpts1[m.queryIdx] for m in good_matches])
good_kpts2 = np.array([cv_kpts2[m.trainIdx] for m in good_matches])
else:
raise Exception("Keypoint type error!")
exit(-1)
_, mask = cv2.findFundamentalMat(good_kpts1, good_kpts2, cv2.RANSAC, err_thld, confidence=0.999)
n_inlier = np.count_nonzero(mask)
print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier)
return good_matches, mask
示例10: get_matches
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def get_matches(self, feat1, feat2, cv_kpts1, cv_kpts2, ratio=None, cross_check=True, err_thld=4, ransac=True, info=''):
"""Compute putative and inlier matches.
Args:
feat: (n_kpts, 128) Local features.
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
ratio: The threshold to apply ratio test.
cross_check: (True by default) Whether to apply cross check.
err_thld: Epipolar error threshold.
info: Info to print out.
Returns:
good_matches: Putative matches.
mask: The mask to distinguish inliers/outliers on putative matches.
"""
init_matches1 = self.matcher.knnMatch(feat1, feat2, k=2)
init_matches2 = self.matcher.knnMatch(feat2, feat1, k=2)
good_matches = []
for i in range(len(init_matches1)):
cond = True
if cross_check:
cond1 = cross_check and init_matches2[init_matches1[i][0].trainIdx][0].trainIdx == i
cond *= cond1
if ratio is not None:
cond2 = init_matches1[i][0].distance <= ratio * init_matches1[i][1].distance
cond *= cond2
if cond:
good_matches.append(init_matches1[i][0])
if type(cv_kpts1) is list and type(cv_kpts2) is list:
good_kpts1 = np.array([cv_kpts1[m.queryIdx].pt for m in good_matches])
good_kpts2 = np.array([cv_kpts2[m.trainIdx].pt for m in good_matches])
elif type(cv_kpts1) is np.ndarray and type(cv_kpts2) is np.ndarray:
good_kpts1 = np.array([cv_kpts1[m.queryIdx] for m in good_matches])
good_kpts2 = np.array([cv_kpts2[m.trainIdx] for m in good_matches])
else:
raise Exception("Keypoint type error!")
exit(-1)
if ransac:
_, mask = cv2.findFundamentalMat(
good_kpts1, good_kpts2, cv2.RANSAC, err_thld, confidence=0.999)
n_inlier = np.count_nonzero(mask)
print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier)
else:
mask = np.ones((len(good_matches), ))
print(info, 'n_putative', len(good_matches))
return good_matches, mask
示例11: matchWithCrossCheckAndModelFit
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def matchWithCrossCheckAndModelFit(self, des1, des2, kps1, kps2, ratio_test=None, cross_check=True, err_thld=1, info=''):
"""Compute putative and inlier matches.
Args:
feat: (n_kpts, 128) Local features.
cv_kpts: A list of keypoints represented as cv2.KeyPoint.
ratio_test: The threshold to apply ratio test.
cross_check: (True by default) Whether to apply cross check.
err_thld: Epipolar error threshold.
info: Info to print out.
Returns:
good_matches: Putative matches.
mask: The mask to distinguish inliers/outliers on putative matches.
"""
idx1, idx2 = [], []
if ratio_test is None:
ratio_test = self.ratio_test
init_matches1 = self.matcher.knnMatch(des1, des2, k=2)
init_matches2 = self.matcher.knnMatch(des2, des1, k=2)
good_matches = []
for i,(m1,n1) in enumerate(init_matches1):
cond = True
if cross_check:
cond1 = cross_check and init_matches2[m1.trainIdx][0].trainIdx == i
cond *= cond1
if ratio_test is not None:
cond2 = m1.distance <= ratio_test * n1.distance
cond *= cond2
if cond:
good_matches.append(m1)
idx1.append(m1.queryIdx)
idx2.append(m1.trainIdx)
if type(kps1) is list and type(kps2) is list:
good_kps1 = np.array([kps1[m.queryIdx].pt for m in good_matches])
good_kps2 = np.array([kps2[m.trainIdx].pt for m in good_matches])
elif type(kps1) is np.ndarray and type(kps2) is np.ndarray:
good_kps1 = np.array([kps1[m.queryIdx] for m in good_matches])
good_kps2 = np.array([kps2[m.trainIdx] for m in good_matches])
else:
raise Exception("Keypoint type error!")
exit(-1)
_, mask = cv2.findFundamentalMat(good_kps1, good_kps2, cv2.RANSAC, err_thld, confidence=0.999)
n_inlier = np.count_nonzero(mask)
print(info, 'n_putative', len(good_matches), 'n_inlier', n_inlier)
return idx1, idx2, good_matches, mask
# input: des1 = query-descriptors, des2 = train-descriptors
# output: idx1, idx2 (vectors of corresponding indexes in des1 and des2, respectively)
# N.B.: this returns matches where each trainIdx index is associated to only one queryIdx index
示例12: match_rpc
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def match_rpc(rpc1, rpc2, rows, columns, x_km=1.0, z_km=0.5, num_samples=100):
# get UTM zone
clat, clon, cheight = rpc1.approximate_wgs84()
easting, northing, zone_number, zone_letter = wgs84_to_utm(clat, clon)
# sample local world coordinates around the center coordinate
print('finding virtual xyz correspondences...')
np.random.seed(0)
dlat = dlon = (x_km / 2.0) / 111.0
dheight = (z_km / 2.0) * 1000.0
lat = np.random.uniform(clat - dlat, clat + dlat, num_samples)
lon = np.random.uniform(clon - dlon, clon + dlon, num_samples)
z = np.random.uniform(cheight - dheight, cheight + dheight, num_samples)
# project into both images
i1, j1 = rpc1.forward_array(lon, lat, z)
i1 = np.int32(np.round(i1))
j1 = np.int32(np.round(j1))
i2, j2 = rpc2.forward_array(lon, lat, z)
i2 = np.int32(np.round(i2))
j2 = np.int32(np.round(j2))
# remove invalid image coordinates
keep = (i1 > 0) & (i1 < columns - 1) & (j1 > 0) & (j1 < rows - 1)
lat = lon[keep]
lon = lon[keep]
i1 = i1[keep]
j1 = j1[keep]
i2 = i2[keep]
j2 = j2[keep]
keep = (i2 > 0) & (i2 < columns - 1) & (j2 > 0) & (j2 < rows - 1)
lat = lon[keep]
lon = lon[keep]
i1 = i1[keep]
j1 = j1[keep]
i2 = i2[keep]
j2 = j2[keep]
print(np.asarray(i1).shape)
count = np.asarray(i1).shape[0]
pts1 = np.asarray([(i1, j1)])
pts1 = pts1[0, :, :].transpose()
pts2 = np.asarray([(i2, j2)])
pts2 = pts2[0, :, :].transpose()
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
print('Points: ', len(pts1))
print('Fundamental matrix = ')
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_8POINT)
print(F)
return F, pts1, pts2
# get epipolar rectification matrices
示例13: Eline
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def Eline (self, point, F, whichImage):
"""
**SUMMARY**
This method returns, line feature object.
**PARAMETERS**
* *point* - Input point (x, y)
* *F* - Fundamental matrix.
* *whichImage* - Index of the image (1 or 2) that contains the point
**RETURNS**
epipolar line, in the form of line feature object.
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> epiline = mapper.Eline(point,F, 1) #find corresponding Epipolar line in the left image.
"""
from SimpleCV.Features.Detection import Line
pts1 = (0,0)
pts2 = self.size
pt_cvmat = cv.CreateMat(1, 1, cv.CV_32FC2)
pt_cvmat[0, 0] = (point[1], point[0]) # OpenCV seems to use (y, x) coordinate.
line = cv.CreateMat(1, 1, cv.CV_32FC3)
cv.ComputeCorrespondEpilines(pt_cvmat, whichImage, npArray2cvMat(F), line)
line_npArray = np.array(line).squeeze()
line_npArray = line_npArray[[1.00, 0, 2]]
pts1 = (pts1[0],(-line_npArray[2]-line_npArray[0]*pts1[0])/line_npArray[1] )
pts2 = (pts2[0],(-line_npArray[2]-line_npArray[0]*pts2[0])/line_npArray[1] )
if whichImage == 1 :
return Line(self.ImageLeft, [pts1,pts2])
elif whichImage == 2 :
return Line(self.ImageRight, [pts1,pts2])
示例14: filter_by_transform
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def filter_by_transform(K, i1, i2, transform):
clean = True
# tol = float(i1.width) / 200.0 # rejection range in pixels
tol = math.pow(i1.width, 0.25)
if tol < 1.0:
tol = 1.0
# print "tol = %.4f" % tol
matches = i1.match_list[i2.name]
if len(matches) < min_pairs:
i1.match_list[i2.name] = []
return True
p1 = []
p2 = []
for k, pair in enumerate(matches):
use_raw_uv = False
if use_raw_uv:
p1.append( i1.kp_list[pair[0]].pt )
p2.append( i2.kp_list[pair[1]].pt )
else:
# undistorted uv points should be better if the camera
# calibration is known, right?
p1.append( i1.uv_list[pair[0]] )
p2.append( i2.uv_list[pair[1]] )
p1 = np.float32(p1)
p2 = np.float32(p2)
#print "p1 = %s" % str(p1)
#print "p2 = %s" % str(p2)
method = cv2.RANSAC
#method = cv2.LMEDS
if transform == "homography":
M, status = cv2.findHomography(p1, p2, method, tol)
elif transform == "fundamental":
M, status = cv2.findFundamentalMat(p1, p2, method, tol)
elif transform == "essential":
M, status = cv2.findEssentialMat(p1, p2, K, method, threshold=tol)
elif transform == "none":
status = np.ones(len(matches))
else:
# fail
M, status = None, None
log(" %s vs %s: %d / %d inliers/matched" % (i1.name, i2.name, np.sum(status), len(status)))
# remove outliers
for k, flag in enumerate(status):
if not flag:
# print(" deleting: " + str(matches[k]))
clean = False
matches[k] = (-1, -1)
for pair in reversed(matches):
if pair == (-1, -1):
matches.remove(pair)
return clean
# Filter duplicate features. SIFT (for example) can detect the same
# feature at different scales/orientations which can lead to duplicate
# match pairs, or possibly one feature in image1 matching two or more
# features in images2. Find and remove these from the set.
示例15: reviewFundamentalErrors
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import findFundamentalMat [as 别名]
def reviewFundamentalErrors(self, fuzz_factor=1.0, interactive=True):
total_removed = 0
# Test fundametal matrix constraint
for i, i1 in enumerate(self.image_list):
# rejection range in pixels
tol = float(i1.width) / 800.0 + fuzz_factor
print("tol = %.4f" % tol)
if tol < 0.0:
tol = 0.0
for key in i1.match_list:
matches = i1.match_list[key]
i2 = self.findImageByName[key]
if i1.name == i2.name:
continue
if len(matches) < min_pairs:
i1.match_list[i2.name] = []
continue
p1 = []
p2 = []
for k, pair in enumerate(matches):
p1.append( i1.kp_list[pair[0]].pt )
p2.append( i2.kp_list[pair[1]].pt )
p1 = np.float32(p1)
p2 = np.float32(p2)
#print "p1 = %s" % str(p1)
#print "p2 = %s" % str(p2)
M, status = cv2.findFundamentalMat(p1, p2, cv2.RANSAC, tol)
size = len(status)
inliers = np.sum(status)
print(' %s vs %s: %d / %d inliers/matched' \
% (i1.name, i2.name, inliers, size))
if inliers < size:
total_removed += (size - inliers)
if interactive:
status = self.showMatch(i1, i2, matches, status)
delete_list = []
for k, flag in enumerate(status):
if not flag:
print(" deleting: " + str(matches[k]))
#match[i] = (-1, -1)
delete_list.append(matches[k])
for pair in delete_list:
self.deletePair(i, j, pair)
return total_removed
# return true if point set is pretty close to linear