當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.solvePnPRansac方法代碼示例

本文整理匯總了Python中cv2.solvePnPRansac方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.solvePnPRansac方法的具體用法?Python cv2.solvePnPRansac怎麽用?Python cv2.solvePnPRansac使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.solvePnPRansac方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: compute_pose_error

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def compute_pose_error(kpts1, kpts2_3d_2, matches, vis1, vis2, T_2to1, K1,
                       reproj_thresh):
    valid = vis1[matches[:, 0]] & vis2[matches[:, 1]]
    matches = matches[valid]
    failure = (None, None)

    if len(matches) < 4:
        return failure

    kpts1 = kpts1[matches[:, 0]].astype(np.float32).reshape((-1, 1, 2))
    kpts2_3d_2 = kpts2_3d_2[matches[:, 1]].reshape((-1, 1, 3))
    success, R_vec, t, inliers = cv2.solvePnPRansac(
        kpts2_3d_2, kpts1, K1, np.zeros(4), flags=cv2.SOLVEPNP_P3P,
        iterationsCount=1000, reprojectionError=reproj_thresh)
    if not success:
        return failure

    R, _ = cv2.Rodrigues(R_vec)
    t = t[:, 0]

    error_t = np.linalg.norm(t - T_2to1[:3, 3])
    error_R = angle_error(R, T_2to1[:3, :3])
    return error_t, error_R 
開發者ID:ethz-asl,項目名稱:hfnet,代碼行數:25,代碼來源:local_descriptors.py

示例2: get_vectors

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def get_vectors(image, points, mtx, dist):
    
    # order points
    points = _order_points(points)

    # set up criteria, image, points and axis
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    imgp = np.array(points, dtype='float32')

    objp = np.array([[0.,0.,0.],[1.,0.,0.],
                        [1.,1.,0.],[0.,1.,0.]], dtype='float32')  

    # calculate rotation and translation vectors
    cv2.cornerSubPix(gray,imgp,(11,11),(-1,-1),criteria)
    rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)

    return rvecs, tvecs 
開發者ID:rdmilligan,項目名稱:SaltwashAR,代碼行數:22,代碼來源:markerfunctions.py

示例3: forward

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def forward(ctx, pts2d, pts3d, K, ini_pose=None):
        bs = pts2d.size(0)
        n = pts2d.size(1)
        device = pts2d.device
        pts3d_np = np.array(pts3d.detach().cpu())
        K_np = np.array(K.detach().cpu())
        P_6d = torch.zeros(bs,6,device=device)

        for i in range(bs):
            pts2d_i_np = np.ascontiguousarray(pts2d[i].detach().cpu()).reshape((n,1,2))
            if ini_pose is None:
                _, rvec0, T0, _ = cv.solvePnPRansac(objectPoints=pts3d_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv.SOLVEPNP_ITERATIVE, confidence=0.9999 ,reprojectionError=3)
            else:
                rvec0 = np.array(ini_pose[i, 0:3].cpu().view(3, 1))
                T0 = np.array(ini_pose[i, 3:6].cpu().view(3, 1))
            _, rvec, T = cv.solvePnP(objectPoints=pts3d_np, imagePoints=pts2d_i_np, cameraMatrix=K_np, distCoeffs=None, flags=cv.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True, rvec=rvec0, tvec=T0)
            angle_axis = torch.tensor(rvec,device=device,dtype=torch.float).view(1, 3)
            T = torch.tensor(T,device=device,dtype=torch.float).view(1, 3)
            P_6d[i,:] = torch.cat((angle_axis,T),dim=-1)

        ctx.save_for_backward(pts2d,P_6d,pts3d,K)
        return P_6d 
開發者ID:BoChenYS,項目名稱:BPnP,代碼行數:24,代碼來源:BPnP.py

示例4: pnp_ransac

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def pnp_ransac(self,rgb_aug_test,img_prob_ori,non_zero,v1,v2,u1,u2):
            rgb_aug_crop =rgb_aug_test[v1:v2,u1:u2]
            xyz = np.copy(rgb_aug_crop)
            xyz  =xyz/255
            xyz = xyz*2-1
            xyz[:,:,0]=xyz[:,:,0]*self.obj_scale[0]+self.obj_ct[0]
            xyz[:,:,1]=xyz[:,:,1]*self.obj_scale[1]+self.obj_ct[1]
            xyz[:,:,2]=xyz[:,:,2]*self.obj_scale[2]+self.obj_ct[2]
            confidence_mask = img_prob_ori< self.th_i
            valid_mask = np.logical_and(non_zero ,confidence_mask)

            vu_list_s= np.where(valid_mask==1)
            n_pts_s=len(vu_list_s[0])
            img_pts_s = np.zeros((n_pts_s,2))
            obj_pts_s=xyz[vu_list_s[0],vu_list_s[1]]
            img_pts_s[:]=np.stack( (vu_list_s[1],vu_list_s[0]),axis=1) #u,v order
            img_pts_s[:,0]=img_pts_s[:,0]+u1
            img_pts_s[:,1]=img_pts_s[:,1]+v1
            img_pts_s = np.ascontiguousarray(img_pts_s[:,:2]).reshape((n_pts_s,1,2))
            if(n_pts_s <6):
                return np.eye(3),np.array([0,0,0]),valid_mask,-1
            ret, rvec, tvec,inliers = cv2.solvePnPRansac(obj_pts_s, img_pts_s, self.camK,None,\
                                      flags=cv2.SOLVEPNP_EPNP,reprojectionError=3,iterationsCount=100)
            if(inliers is None):
                return np.eye(3),np.array([0,0,0]),-1,-1
            else:
                rot_pred = np.eye(3)
                tra_pred = tvec[:,0]
                cv2.Rodrigues(rvec, rot_pred)                
                return rot_pred,tra_pred,valid_mask,len(inliers) 
開發者ID:kirumang,項目名稱:Pix2Pose,代碼行數:32,代碼來源:recognition.py

示例5: solve_pnp_ransac

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def solve_pnp_ransac(pts3d, pts, intrinsic_matrix):
    val, rvec, tvec, inliers = cv2.solvePnPRansac(
            np.array(pts3d), np.array(pts), 
            intrinsic_matrix, None, None, None, 
            False, 50, 2.0, 0.99, None)
    if inliers is None or len(inliers) < 5:
        return None, None

    T = g2o.Isometry3d(cv2.Rodrigues(rvec)[0], tvec)
    return T, inliers.ravel() 
開發者ID:uoip,項目名稱:rgbd_ptam,代碼行數:12,代碼來源:loopclosing.py

示例6: pnp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def pnp(points_3d, points_2d, camera_matrix,method=cv2.SOLVEPNP_ITERATIVE):
    try:
        dist_coeffs = pnp.dist_coeffs
    except:
        dist_coeffs = np.zeros(shape=[8, 1], dtype='float64')

    assert points_3d.shape[0] == points_2d.shape[0], 'points 3D and points 2D must have same number of vertices'
    if method==cv2.SOLVEPNP_EPNP:
        points_3d=np.expand_dims(points_3d, 0)
        points_2d=np.expand_dims(points_2d, 0)

    points_2d = np.ascontiguousarray(points_2d.astype(np.float64))
    points_3d = np.ascontiguousarray(points_3d.astype(np.float64))
    camera_matrix = camera_matrix.astype(np.float64)
    # _, R_exp, t = cv2.solvePnP(points_3d,
    #                            points_2d,
    #                            camera_matrix,
    #                            dist_coeffs,
    #                            flags=method)
    #                           # , None, None, False, cv2.SOLVEPNP_UPNP)

    _, R_exp, t, _ = cv2.solvePnPRansac(points_3d,
                               points_2d,
                               camera_matrix,
                               dist_coeffs,
                               )

    R, _ = cv2.Rodrigues(R_exp)
    # trans_3d=np.matmul(points_3d,R.transpose())+t.transpose()
    # if np.max(trans_3d[:,2]<0):
    #     R=-R
    #     t=-t

    return np.concatenate([R, t], axis=-1) 
開發者ID:ethnhe,項目名稱:PVN3D,代碼行數:36,代碼來源:evaluation_utils.py

示例7: find_current_pose

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def find_current_pose(self, object_points, intrinsics):
        """
        Find camera pose relative to object using current image point set,
        object_points are treated as world coordinates
        """
        success, rotation_vector, translation_vector = cv2.solvePnPRansac(object_points, self.current_image_points,
                                                                          intrinsics.intrinsic_mat,
                                                                          intrinsics.distortion_coeffs,
                                                                          flags=cv2.SOLVEPNP_ITERATIVE)[0:3]
        if success:
            self.poses.append(Pose(rotation=rotation_vector, translation_vector=translation_vector))
        else:
            self.poses.append(None)
        return success 
開發者ID:Algomorph,項目名稱:cvcalib,代碼行數:16,代碼來源:video.py

示例8: solve_pnp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def solve_pnp(K: torch.Tensor, x_2d: torch.Tensor, X_3d_w: torch.Tensor, reproj_thres=2.0):
    """
    Solve PnP problem with OpenCV lib
    :param K: camera intrinsic matrix, dim: (N, 3x3) or (3, 3)
    :param x_2d: 2D coordinates, dim: (N, H, W, 2), (H, W, 2),
    :param X_3d_w: 3D world coordinates, dim: (N, H, W, 2), (H, W, 3)
    :return:
    """

    keep_dim_n = False
    if K.dim() == 2:
        keep_dim_n = True
        K = K.unsqueeze(0)
        x_2d = x_2d.unsqueeze(0)
        X_3d_w = X_3d_w.unsqueeze(0)

    N, H, W = x_2d.shape[:3]
    K = K.detach().cpu().numpy()
    x_2d = x_2d.detach().cpu().numpy()
    X_3d_w = X_3d_w.view(N, -1, 3).detach().cpu().numpy()

    poses = []
    x_2d = x_2d[0].reshape(1, H*W, 2)
    dist = np.zeros(4)
    for n in range(N):
        k = K[n]
        X_3d = X_3d_w[n].reshape(1, H*W, 3)
        _, R_res, t_res, _ = cv2.solvePnPRansac(X_3d, x_2d, k, dist, reprojectionError=reproj_thres)
        R_res, _ = cv2.Rodrigues(R_res)

        pnp_pose = np.eye(4, dtype=np.float32)
        pnp_pose[:3, :3] = R_res
        pnp_pose[:3, 3] = t_res.ravel()
        poses.append(pnp_pose)

    poses = torch.cat([torch.from_numpy(pose) for pose in poses])

    if keep_dim_n is True:
        poses.squeeze(0)

    return poses 
開發者ID:sfu-gruvi-3dv,項目名稱:sanet_relocal_demo,代碼行數:43,代碼來源:ransc_pnp.py

示例9: do_pnp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def do_pnp(kpts, lms, query_info, config):
    kpts = kpts.astype(np.float32).reshape((-1, 1, 2))
    lms = lms.astype(np.float32).reshape((-1, 1, 3))

    success, R_vec, t, inliers = cv2.solvePnPRansac(
        lms, kpts, query_info.K, np.array([query_info.dist, 0, 0, 0]),
        iterationsCount=5000, reprojectionError=config['reproj_error'],
        flags=cv2.SOLVEPNP_P3P)

    if success:
        inliers = inliers[:, 0]
        num_inliers = len(inliers)
        inlier_ratio = len(inliers) / len(kpts)
        success &= num_inliers >= config['min_inliers']

        ret, R_vec, t = cv2.solvePnP(
                lms[inliers], kpts[inliers], query_info.K,
                np.array([query_info.dist, 0, 0, 0]), rvec=R_vec, tvec=t,
                useExtrinsicGuess=True, flags=cv2.SOLVEPNP_ITERATIVE)
        assert ret

        query_T_w = np.eye(4)
        query_T_w[:3, :3] = cv2.Rodrigues(R_vec)[0]
        query_T_w[:3, 3] = t[:, 0]
        w_T_query = np.linalg.inv(query_T_w)

        ret = LocResult(success, num_inliers, inlier_ratio, w_T_query)
    else:
        inliers = np.empty((0,), np.int32)
        ret = loc_failure

    return ret, inliers 
開發者ID:ethz-asl,項目名稱:hfnet,代碼行數:34,代碼來源:localization.py

示例10: cube

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def cube(img):
    #img_in = cv2.imread("Picture 27.jpg")
    #img = cv2.resize(img_in,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
        #cv2.imshow('img',img)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        #cv2.imshow('gray',gray)
    ret, corners = cv2.findChessboardCorners(gray, (8,7),None)
        # print ret,corners

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    objp = np.zeros((7*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8,0:7].T.reshape(-1,2)

    #axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
    axis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],
                       [0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])

    if ret == True:
        cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
            
            
                # Find the rotation and translation vectors.
        rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)

                # project 3D points to image plane
        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
        #print imgpts
        img = draw2(img,corners,imgpts)
        
    return img 
開發者ID:fatcloud,項目名稱:PyCV-time,代碼行數:32,代碼來源:3D_Cube.py

示例11: flow2se3

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def flow2se3(depth_object, flow, mask_image, K):
    """
    give flow from object to image, calculate the pose

    :param depth_object: height x width, ndarray the depth map of object image.
    :param flow: height x width x (w, h) flow from object image to real image
    :param mask_image: height x width, the mask of real image
    :param K: 3x3 intrinsic matrix
    :return: se3: 3x4 matrix.
    """
    height = depth_object.shape[0]
    width = depth_object.shape[1]
    assert mask_image.shape == (height, width)
    valid_in_object = (depth_object != 0).flatten()
    all_op = backproject_camera(depth_object, intrinsic_matrix=K)
    # all_op = all_op.reshape((3, width, height))

    x, y = np.meshgrid(np.arange(width), np.arange(height))
    x = x.astype(np.float64)
    y = y.astype(np.float64)
    x += flow[:, :, 0]
    y += flow[:, :, 1]
    x = x.flatten()
    y = y.flatten()
    all_ip = np.vstack((x, y))

    valid_in_image = (mask_image != 0).flatten()

    valid = np.where(np.logical_and(valid_in_object, valid_in_image))[0]
    objectPoints = all_op[:, valid].astype(np.float64).transpose()
    imagePoints = all_ip[:, valid].astype(np.float64).transpose()
    convex, rvec, tvec, inliers = cv2.solvePnPRansac(objectPoints, imagePoints, K, np.zeros(4))

    se3_q = np.zeros(7)
    if convex:
        R, _ = cv2.Rodrigues(rvec)
        se3_q[:4] = RT_transform.mat2quat(R)
        se3_q[4:] = tvec.flatten()
        return convex, se3_q
    else:
        se3_q[0] = 1
        return convex, se3_q 
開發者ID:liyi14,項目名稱:mx-DeepIM,代碼行數:44,代碼來源:flow2se3.py

示例12: get_pose_pnp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def get_pose_pnp(rgb_curr, rgb_near, depth_curr, K):
    gray_curr = rgb2gray(rgb_curr).astype(np.uint8)
    gray_near = rgb2gray(rgb_near).astype(np.uint8)
    height, width = gray_curr.shape

    pts2d_curr, pts2d_near = feature_match(gray_curr,
                                           gray_near)  # feature matching

    # dilation of depth
    kernel = np.ones((4, 4), np.uint8)
    depth_curr_dilated = cv2.dilate(depth_curr, kernel)

    # extract 3d pts
    pts3d_curr = []
    pts2d_near_filtered = [
    ]  # keep only feature points with depth in the current frame
    for i, pt2d in enumerate(pts2d_curr):
        # print(pt2d)
        u, v = pt2d[0], pt2d[1]
        z = depth_curr_dilated[v, u]
        if z > 0:
            xyz_curr = convert_2d_to_3d(u, v, z, K)
            pts3d_curr.append(xyz_curr)
            pts2d_near_filtered.append(pts2d_near[i])

    # the minimal number of points accepted by solvePnP is 4:
    if len(pts3d_curr) >= 4 and len(pts2d_near_filtered) >= 4:
        pts3d_curr = np.expand_dims(np.array(pts3d_curr).astype(np.float32),
                                    axis=1)
        pts2d_near_filtered = np.expand_dims(
            np.array(pts2d_near_filtered).astype(np.float32), axis=1)

        # ransac
        ret = cv2.solvePnPRansac(pts3d_curr,
                                 pts2d_near_filtered,
                                 K,
                                 distCoeffs=None)
        success = ret[0]
        rotation_vector = ret[1]
        translation_vector = ret[2]
        return (success, rotation_vector, translation_vector)
    else:
        return (0, None, None) 
開發者ID:fangchangma,項目名稱:self-supervised-depth-completion,代碼行數:45,代碼來源:pose_estimator.py

示例13: getP

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def getP(self, dst):
        """
        dst: 標記物關鍵點

        return self.MTX,self.DIST,self.RVEC,self.TVEC:
        反饋 內參、畸變係數,旋轉向量,位移向量

        """
        if self.SceneImage is None:
            return None

        corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
        gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
        objp = np.zeros((2*2,3), np.float32)
        objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)

        corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)

        if self.PTimes < self.PCount or self.PCount == 0:
            # Arrays to store object points and image points from all the images.
            objpoints = self.OBJPoints # 3d point in real world space
            imgpoints = self.IMGPoints # 2d points in image plane.

            if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
                objpoints.append(objp)
                imgpoints.append(corners2)

            # Find mtx, dist, rvecs, tvecs
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
            if not ret:
                self.PTimes += 1
                return None
            self.OBJPoints = objpoints
            self.IMGPoints = imgpoints
            self.MTX = mtx
            self.DIST = dist
            self.RVEC = rvecs[0]
            self.TVEC = tvecs[0]
        else:
            # Find the rotation and translation vectors.
            _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
            self.RVEC = rvec
            self.TVEC = tvec
        self.PTimes += 1

        return self.MTX,self.DIST,self.RVEC,self.TVEC 
開發者ID:GeekLiB,項目名稱:AR-BXT-AR4Python,代碼行數:52,代碼來源:getPMatrix.py

示例14: compute_pose_pnp_from_valid_pixels

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def compute_pose_pnp_from_valid_pixels(gt_Tcws, query_X_w, rand_R, scene_center, query_K, valid_pix_idx, pnp_x_2d,
                                       repro_thres):
    N, _, H, W = query_X_w.shape

    # recover original scene coordinates
    query_X_3d_w = query_X_w.permute(0, 2, 3, 1).view(N, -1, 3)
    rand_R_t = torch.transpose(rand_R, 1, 2).to(query_X_3d_w.device)
    query_X_3d_w = batched_transpose(rand_R_t, torch.zeros(N, 3).to(query_X_3d_w.device), query_X_3d_w)
    query_X_3d_w += scene_center.view(N, 1, 3)
    query_X_3d_w = recover_original_scene_coordinates(query_X_w, rand_R, scene_center)
    query_X_3d_w = query_X_3d_w.view(N, H, W, 3).squeeze(0).detach().cpu().numpy()

    # select valid pixels with input index
    x, y = valid_pix_idx
    x_2d_valid = pnp_x_2d[y, x, :]
    query_X_3d_valid = query_X_3d_w[y, x, :]
    selected_pixels = query_X_3d_valid.shape[0]

    query_X_3d_valid = query_X_3d_valid.reshape(1, selected_pixels, 3)
    x_2d_valid = x_2d_valid.reshape(1, selected_pixels, 2)

    # run Ransac PnP
    dist = np.zeros(4)
    k = query_K.squeeze(0).detach().cpu().numpy()
    retval, R_res, t_res, ransc_inlier = cv2.solvePnPRansac(query_X_3d_valid, x_2d_valid, k, dist,
                                                            reprojectionError=repro_thres, )
    #     print(retval)
    #     _, R_res, t_res = cv2.solvePnP(query_X_3d_valid, x_2d_valid, k, dist)#, flags=cv2.SOLVEPNP_EPNP)

    R_res, _ = cv2.Rodrigues(R_res)
    pnp_pose = np.eye(4, dtype=np.float32)
    pnp_pose[:3, :3] = R_res
    pnp_pose[:3, 3] = t_res.ravel()

    # measure accuracy
    gt_pose = gt_Tcws.squeeze(0).detach().cpu().numpy()

    R_acc = rel_rot_angle(pnp_pose, gt_pose)
    t_acc = rel_distance(pnp_pose, gt_pose)

    #     ransc_inlier = None
    return R_acc, t_acc, pnp_pose, ransc_inlier 
開發者ID:sfu-gruvi-3dv,項目名稱:sanet_relocal_demo,代碼行數:44,代碼來源:util_func.py

示例15: update_pose

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import solvePnPRansac [as 別名]
def update_pose(matches, new_index):
    new_image = proj.image_list[new_index]
    
    # Build a list of existing 3d ned vs. 2d uv coordinates for the
    # new image so we can run solvepnp() and derive an initial pose
    # estimate relative to the already placed group.
    new_ned_list = []
    new_uv_list = []
    for i, match in enumerate(matches):
        # only proceed with 'located' features
        if match[0] != None:
            # check if this match refers to the new image
            for m in match[1:]:
                if m[0] == new_index:
                    new_ned_list.append(match[0])
                    new_uv_list.append(new_image.uv_list[m[1]])
                    break
    print "Number of solvepnp coordinates:", len(new_ned_list)

    # debug
    # f = open('ned.txt', 'wb')
    # for ned in new_ned_list:
    #     f.write("%.2f %.2f %.2f\n" % (ned[0], ned[1], ned[2]))

    # f = open('uv.txt', 'wb')
    # for uv in new_uv_list:
    #     f.write("%.1f %.1f\n" % (uv[0], uv[1]))

    # pose new image here:
    rvec, tvec = new_image.get_proj()
    #print 'new_ned_list', new_ned_list
    #print 'new_uv_list', new_uv_list
    (result, rvec, tvec, inliers) \
        = cv2.solvePnPRansac(np.float32(new_ned_list), np.float32(new_uv_list),
                             proj.cam.get_K(scale), None,
                             rvec, tvec, useExtrinsicGuess=True)
    print 'solvePnPRansac:', result
    if result:
        Rned2cam, jac = cv2.Rodrigues(rvec)
        pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(tvec)
        newned = pos.T[0].tolist()[0]

        # Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
        # so solvePnP returns this combination.  We can extract Rned by
        # premultiplying by cam2body aka inv(body2cam).
        cam2body = new_image.get_cam2body()
        Rned2body = cam2body.dot(Rned2cam)
        Rbody2ned = np.matrix(Rned2body).T
        (yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')

        print "original pose:", new_image.get_camera_pose()
        #print "original pose:", proj.image_list[30].get_camera_pose()
        new_image.set_camera_pose_sba(ned=newned,
                                      ypr=[yaw*r2d, pitch*r2d, roll*r2d])
        new_image.save_meta()
        print "solvepnp() pose:", new_image.get_camera_pose_sba()
    return result 
開發者ID:UASLab,項目名稱:ImageAnalysis,代碼行數:59,代碼來源:5a-sba3.py


注:本文中的cv2.solvePnPRansac方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。