本文整理匯總了Python中cv2.recoverPose方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.recoverPose方法的具體用法?Python cv2.recoverPose怎麽用?Python cv2.recoverPose使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.recoverPose方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: estimate_relative_pose_from_correspondence
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import recoverPose [as 別名]
def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2):
f_avg = (K1[0, 0] + K2[0, 0]) / 2
pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32)
pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None)
pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None)
E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.),
method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg)
points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
return mask[:,0].astype(np.bool), R_est, t_est
示例2: estimate_pose_ess_mat
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import recoverPose [as 別名]
def estimate_pose_ess_mat(kpn_ref, kpn_cur, method=cv2.RANSAC, prob=0.999, threshold=0.0003):
# here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )
E, mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=method, prob=prob, threshold=threshold)
_, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))
return poseRt(R,t.T), mask_match # Trc, mask_mat
# z rotation, input in radians
示例3: estimatePose
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import recoverPose [as 別名]
def estimatePose(self, kpn_ref, kpn_cur):
# here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )
E, self.mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)
_, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))
return poseRt(R,t.T) # Trc homogeneous transformation matrix with respect to 'ref' frame, pr_= Trc * pc_
# push the first image
示例4: estimatePose
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import recoverPose [as 別名]
def estimatePose(self, kps_ref, kps_cur):
kp_ref_u = self.cam.undistort_points(kps_ref)
kp_cur_u = self.cam.undistort_points(kps_cur)
self.kpn_ref = self.cam.unproject_points(kp_ref_u)
self.kpn_cur = self.cam.unproject_points(kp_cur_u)
if kUseEssentialMatrixEstimation:
# the essential matrix algorithm is more robust since it uses the five-point algorithm solver by D. Nister (see the notes and paper above )
E, self.mask_match = cv2.findEssentialMat(self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)
else:
# just for the hell of testing fundamental matrix fitting ;-)
F, self.mask_match = self.computeFundamentalMatrix(kp_cur_u, kp_ref_u)
E = self.cam.K.T @ F @ self.cam.K # E = K.T * F * K
#self.removeOutliersFromMask(self.mask) # do not remove outliers, the last unmatched/outlier features can be matched and recognized as inliers in subsequent frames
_, R, t, mask = cv2.recoverPose(E, self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.))
return R,t # Rrc, trc (with respect to 'ref' frame)
示例5: processSecondFrame
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import recoverPose [as 別名]
def processSecondFrame(self):
self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
self.frame_stage = STAGE_DEFAULT_FRAME
self.px_ref = self.px_cur
示例6: processFrame
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import recoverPose [as 別名]
def processFrame(self, frame_id):
self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
absolute_scale = self.getAbsoluteScale(frame_id)
if(absolute_scale > 0.1):
self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t)
self.cur_R = R.dot(self.cur_R)
if(self.px_ref.shape[0] < kMinNumFeature):
self.px_cur = self.detector.detect(self.new_frame)
self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
self.px_ref = self.px_cur