本文整理匯總了Python中cv2.undistortPoints方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.undistortPoints方法的具體用法?Python cv2.undistortPoints怎麽用?Python cv2.undistortPoints使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.undistortPoints方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: undistort_image_bounds
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort_image_bounds(self):
uv_bounds = np.array([[self.u_min, self.v_min],
[self.u_min, self.v_max],
[self.u_max, self.v_min],
[self.u_max, self.v_max]], dtype=np.float32).reshape(4,2)
#print('uv_bounds: ', uv_bounds)
if self.is_distorted:
uv_bounds_undistorted = cv2.undistortPoints(np.expand_dims(uv_bounds, axis=1), self.K, self.D, None, self.K)
uv_bounds_undistorted = uv_bounds_undistorted.ravel().reshape(uv_bounds_undistorted.shape[0], 2)
else:
uv_bounds_undistorted = uv_bounds
#print('uv_bounds_undistorted: ', uv_bounds_undistorted)
self.u_min = min(uv_bounds_undistorted[0][0],uv_bounds_undistorted[1][0])
self.u_max = max(uv_bounds_undistorted[2][0],uv_bounds_undistorted[3][0])
self.v_min = min(uv_bounds_undistorted[0][1],uv_bounds_undistorted[2][1])
self.v_max = max(uv_bounds_undistorted[1][1],uv_bounds_undistorted[3][1])
# print('camera u_min: ', self.u_min)
# print('camera u_max: ', self.u_max)
# print('camera v_min: ', self.v_min)
# print('camera v_max: ', self.v_max)
示例2: undistort_uvlist
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort_uvlist(self, image, uv_orig):
if len(uv_orig) == 0:
return []
# camera parameters
dist_coeffs = np.array(camera.get_dist_coeffs())
K = camera.get_K()
# assemble the points in the proper format
uv_raw = np.zeros((len(uv_orig),1,2), dtype=np.float32)
for i, kp in enumerate(uv_orig):
uv_raw[i][0] = (kp[0], kp[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# return the results in an easier format
result = []
for i, uv in enumerate(uv_new):
result.append(uv_new[i][0])
#print " orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i][0]
return result
# for each feature in each image, compute the undistorted pixel
# location (from the calibrated distortion parameters)
示例3: estimate_relative_pose_from_correspondence
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2):
f_avg = (K1[0, 0] + K2[0, 0]) / 2
pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32)
pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None)
pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None)
E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.),
method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg)
points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
return mask[:,0].astype(np.bool), R_est, t_est
示例4: undistort_points
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort_points(self, uvs):
if self.is_distorted:
#uvs_undistorted = cv2.undistortPoints(np.expand_dims(uvs, axis=1), self.K, self.D, None, self.K) # => Error: while undistorting the points error: (-215:Assertion failed) src.isContinuous()
uvs_contiguous = np.ascontiguousarray(uvs[:, :2]).reshape((uvs.shape[0], 1, 2))
uvs_undistorted = cv2.undistortPoints(uvs_contiguous, self.K, self.D, None, self.K)
return uvs_undistorted.ravel().reshape(uvs_undistorted.shape[0], 2)
else:
return uvs
# update image bounds
示例5: check_calibration
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def check_calibration(self, calibration):
"""
Check calibration quality by computing average reprojection error.
First, undistort detected points and compute epilines for each side.
Then compute the error between the computed epipolar lines and the
position of the points detected on the other side for each point and
return the average error.
"""
sides = "left", "right"
which_image = {sides[0]: 1, sides[1]: 2}
undistorted, lines = {}, {}
for side in sides:
undistorted[side] = cv2.undistortPoints(
np.concatenate(self.image_points[side]).reshape(-1,
1, 2),
calibration.cam_mats[side],
calibration.dist_coefs[side],
P=calibration.cam_mats[side])
lines[side] = cv2.computeCorrespondEpilines(undistorted[side],
which_image[side],
calibration.f_mat)
total_error = 0
this_side, other_side = sides
for side in sides:
for i in range(len(undistorted[side])):
total_error += abs(undistorted[this_side][i][0][0] *
lines[other_side][i][0][0] +
undistorted[this_side][i][0][1] *
lines[other_side][i][0][1] +
lines[other_side][i][0][2])
other_side, this_side = sides
total_points = self.image_count * len(self.object_points)
return total_error / total_points
示例6: undistort
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort(self, distorted_image_coords, Kundistortion=None):
"""
Remove distortion from image coordinates.
:param distorted_image_coords: real image coordinates
:type distorted_image_coords: numpy.ndarray, shape=(2, n)
:param Kundistortion: camera matrix for undistorted view, None for self.K
:type Kundistortion: array-like, shape=(3, 3)
:return: linear image coordinates
:rtype: numpy.ndarray, shape=(2, n)
"""
assert distorted_image_coords.shape[0] == 2
assert distorted_image_coords.ndim == 2
if Kundistortion is None:
Kundistortion = self.K
if self.calibration_type == 'division':
A = self.get_A(Kundistortion)
Ainv = np.linalg.inv(A)
undistorted_image_coords = p2e(A.dot(e2p(self._undistort_division(p2e(Ainv.dot(e2p(distorted_image_coords)))))))
elif self.calibration_type == 'opencv':
undistorted_image_coords = cv2.undistortPoints(distorted_image_coords.T.reshape((1, -1, 2)),
self.K, self.opencv_dist_coeff,
P=Kundistortion).reshape(-1, 2).T
elif self.calibration_type == 'opencv_fisheye':
undistorted_image_coords = cv2.fisheye.undistortPoints(distorted_image_coords.T.reshape((1, -1, 2)),
self.K, self.opencv_dist_coeff,
P=Kundistortion).reshape(-1, 2).T
else:
warn('undistortion not implemented')
undistorted_image_coords = distorted_image_coords
assert undistorted_image_coords.shape[0] == 2
assert undistorted_image_coords.ndim == 2
return undistorted_image_coords
示例7: localize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def localize(self, query_info, query_item, global_transf, local_transf):
global_desc = global_transf(query_item.global_desc[np.newaxis])[0]
local_desc = local_transf(query_item.local_desc)
keypoints = cv2.undistortPoints(
query_item.keypoints[np.newaxis], query_info.K,
np.array([query_info.dist, 0, 0, 0]))[0]
logging.info('Localizing image %s', query_info.name)
ret = self.cpp_backend.localize(
global_desc.astype(np.float32),
keypoints.astype(np.float32).T.copy(),
local_desc.astype(np.float32).T.copy())
(success, num_components_total, num_components_tested,
last_component_size, num_db_landmarks, num_matches,
num_inliers, num_iters, global_ms, covis_ms, local_ms, pnp_ms) = ret
result = LocResult(success, num_inliers, 0, np.eye(4))
stats = {
'success': success,
'num_components_total': num_components_total,
'num_components_tested': num_components_tested,
'last_component_size': last_component_size,
'num_db_landmarks': num_db_landmarks,
'num_matches': num_matches,
'num_inliers': num_inliers,
'num_ransac_iters': num_iters,
'timings': {
'global': global_ms,
'covis': covis_ms,
'local': local_ms,
'pnp': pnp_ms,
}
}
return (result, stats)
示例8: undistort
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort(uv_orig):
# convert the point into the proper format for opencv
uv_raw = np.zeros((1,1,2), dtype=np.float32)
uv_raw[0][0] = (uv_orig[0], uv_orig[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# print(uv_orig, type(uv_new), uv_new)
return uv_new[0][0]
# cull points from the per-image pool that project outside the grid boundaries
示例9: undistort
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort(uv_orig):
# convert the point into the proper format for opencv
uv_raw = np.zeros((1,1,2), dtype=np.float32)
uv_raw[0][0] = (uv_orig[0], uv_orig[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# print(uv_orig, type(uv_new), uv_new)
return uv_new[0][0]
示例10: undistort_features
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort_features(self):
if not len(self.kp_list):
return
K = self.cam.get_K(optimized)
uv_raw = np.zeros((len(image.kp_list),1,2), dtype=np.float32)
for i, kp in enumerate(image.kp_list):
uv_raw[i][0] = (kp.pt[0], kp.pt[1])
dist_coeffs = self.cam.get_dist_coeffs(optimized)
uv_new = cv2.undistortPoints(uv_raw, K, np.array(dist_coeffs), P=K)
image.uv_list = []
for i, uv in enumerate(uv_new):
image.uv_list.append(uv_new[i][0])
# print(" orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i]
示例11: undistort_image_keypoints
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def undistort_image_keypoints(self, image, optimized=False):
if len(image.kp_list) == 0:
return
K = camera.get_K(optimized)
uv_raw = np.zeros((len(image.kp_list),1,2), dtype=np.float32)
for i, kp in enumerate(image.kp_list):
uv_raw[i][0] = (kp.pt[0], kp.pt[1])
dist_coeffs = camera.get_dist_coeffs(optimized)
uv_new = cv2.undistortPoints(uv_raw, K, np.array(dist_coeffs), P=K)
image.uv_list = []
for i, uv in enumerate(uv_new):
image.uv_list.append(uv_new[i][0])
# print(" orig = %s undistort = %s" % (uv_raw[i][0], uv_new[i]
# for each feature in each image, compute the undistorted pixel
# location (from the calibrated distortion parameters)
示例12: cpmtriangulate
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import undistortPoints [as 別名]
def cpmtriangulate(pts):
pts = pts[:,::-1,:]
c1 = Camera(Camera.buildK(
#[564.5793378468188, 562.7507396707426, 807.514870534443, 638.3417715516073]),
[517.2287393382929, 525.0704075144106, 774.5928420208769, 591.6267497011125]),
np.eye(3),
np.zeros((3,1)))
P2 = np.array([
#[0.9987049032311739, 0.005161677353747297, -0.05061495183159303, 0.0975936934184045],
#[-0.004173863762698966, 0.9997991391796881, 0.01960255485522677, 0.00181642123998563],
#[0.05070596733431972, -0.01936590773647232, 0.9985258466831194, 0.006270242291420671]
[0.9997257921076083, -0.002649760120974218, -0.023266270996836397, 0.09259191413077857],
[0.0027696869905852674, 0.9999830374718406, 0.005123826943546446, -0.0014153393536146166],
[0.02325229942975788, -0.005186862237858692, 0.9997161732368524, -0.0005078842007711909]
])
c2 = Camera(Camera.buildK(
[521.1484829793496, 526.8842673949462, 789.4993718170895, 576.4476020205435]),
P2[:,:3],
P2[:,3])
#c1, c2 = read_temple_camera()
npts = pts.shape[0]
pts_coord = [[], []]
for p in pts:
p1, p2 = p[0], p[1]
p1, p2 = coordinate_recover(p1), coordinate_recover(p2)
pts_coord[0].append(p1)
pts_coord[1].append(p2)
pts1 = np.asarray(pts_coord[0]).reshape((npts,1,2)).astype('float32')
pts2 = np.asarray(pts_coord[1]).reshape((npts,1,2)).astype('float32')
if True: # do undistort:
pts1 = cv2.undistortPoints(pts1, c1.K,
np.array([-0.23108204 ,0.03321534, 0.00227184 ,0.00240575]))
#pts1 = cv2.undistortPoints(pts1, c1.K,
#np.array([0,0,0,0]))
pts1 = pts1.reshape((npts,2))
pts2 = cv2.undistortPoints(pts2, c2.K,
np.array([-0.23146758 ,0.03342091 ,0.00133691 ,0.00034652]))
#pts2 = cv2.undistortPoints(pts2, c2.K,
#np.array([0,0,0,0]))
pts2 = pts2.reshape((npts,2))
c1 = Camera(np.eye(3),c1.R,c1.t)
c2 = Camera(np.eye(3),c2.R,c2.t)
else:
pts1 = pts1[:,0,:]
pts2 = pts2[:,0,:]
pts3d = []
for p1, p2 in zip(pts1, pts2):
p3d = triangulate(c1, c2, p1, p2)
pts3d.append(p3d)
pts3d = np.array(pts3d)
return pts3d