本文整理汇总了Python中cv2.recoverPose方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.recoverPose方法的具体用法?Python cv2.recoverPose怎么用?Python cv2.recoverPose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.recoverPose方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: estimate_relative_pose_from_correspondence
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import recoverPose [as 别名]
def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2):
f_avg = (K1[0, 0] + K2[0, 0]) / 2
pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32)
pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None)
pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None)
E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.),
method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg)
points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
return mask[:,0].astype(np.bool), R_est, t_est
示例2: estimate_pose_ess_mat
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import recoverPose [as 别名]
def estimate_pose_ess_mat(kpn_ref, kpn_cur, method=cv2.RANSAC, prob=0.999, threshold=0.0003):
# here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )
E, mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=method, prob=prob, threshold=threshold)
_, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))
return poseRt(R,t.T), mask_match # Trc, mask_mat
# z rotation, input in radians
示例3: estimatePose
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import recoverPose [as 别名]
def estimatePose(self, kpn_ref, kpn_cur):
# here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )
E, self.mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)
_, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))
return poseRt(R,t.T) # Trc homogeneous transformation matrix with respect to 'ref' frame, pr_= Trc * pc_
# push the first image
示例4: estimatePose
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import recoverPose [as 别名]
def estimatePose(self, kps_ref, kps_cur):
kp_ref_u = self.cam.undistort_points(kps_ref)
kp_cur_u = self.cam.undistort_points(kps_cur)
self.kpn_ref = self.cam.unproject_points(kp_ref_u)
self.kpn_cur = self.cam.unproject_points(kp_cur_u)
if kUseEssentialMatrixEstimation:
# the essential matrix algorithm is more robust since it uses the five-point algorithm solver by D. Nister (see the notes and paper above )
E, self.mask_match = cv2.findEssentialMat(self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)
else:
# just for the hell of testing fundamental matrix fitting ;-)
F, self.mask_match = self.computeFundamentalMatrix(kp_cur_u, kp_ref_u)
E = self.cam.K.T @ F @ self.cam.K # E = K.T * F * K
#self.removeOutliersFromMask(self.mask) # do not remove outliers, the last unmatched/outlier features can be matched and recognized as inliers in subsequent frames
_, R, t, mask = cv2.recoverPose(E, self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.))
return R,t # Rrc, trc (with respect to 'ref' frame)
示例5: processSecondFrame
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import recoverPose [as 别名]
def processSecondFrame(self):
self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
self.frame_stage = STAGE_DEFAULT_FRAME
self.px_ref = self.px_cur
示例6: processFrame
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import recoverPose [as 别名]
def processFrame(self, frame_id):
self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
absolute_scale = self.getAbsoluteScale(frame_id)
if(absolute_scale > 0.1):
self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t)
self.cur_R = R.dot(self.cur_R)
if(self.px_ref.shape[0] < kMinNumFeature):
self.px_cur = self.detector.detect(self.new_frame)
self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
self.px_ref = self.px_cur