當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.projectPoints方法代碼示例

本文整理匯總了Python中cv2.projectPoints方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.projectPoints方法的具體用法?Python cv2.projectPoints怎麽用?Python cv2.projectPoints使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.projectPoints方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: solve_head_pose

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def solve_head_pose(self, face_landmarks):
        indices = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35]
        image_pts = np.zeros((len(indices), 2))
        for i in range(len(indices)):
            part = face_landmarks.part(indices[i])
            image_pts[i, 0] = part.x
            image_pts[i, 1] = part.y

        _, rotation_vec, translation_vec = cv2.solvePnP(self.face_model_points,
                                                        image_pts,
                                                        self.camera_matrix,
                                                        self.distortion_coeffs)
        projected_head_pose_box_points, _ = cv2.projectPoints(self.head_pose_box_points,
                                                              rotation_vec,
                                                              translation_vec,
                                                              self.camera_matrix,
                                                              self.distortion_coeffs)
        projected_head_pose_box_points = tuple(map(tuple, projected_head_pose_box_points.reshape(8, 2)))

        # Calculate euler angle
        rotation_mat, _ = cv2.Rodrigues(rotation_vec)
        pose_mat = cv2.hconcat((rotation_mat, translation_vec))
        _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
        return projected_head_pose_box_points, euler_angles 
開發者ID:pkhungurn,項目名稱:talking-head-anime-demo,代碼行數:26,代碼來源:head_pose_solver.py

示例2: drawBox

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def drawBox(camera_parameters, markers, frame):
    objpts = np.float32([[0,0,0], [1,0,0], [1,1,0], [0,1,0],
                         [0,0,1], [1,0,1], [1,1,1], [0,1,1]]).reshape(-1,3)
    mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff

    for marker in markers:
        rvec, tvec = marker.rvec, marker.tvec
        imgpts, jac = cv2.projectPoints(objpts, rvec, tvec, mtx, dist)

        cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[0].ravel()), (0,0,255), 2)

        cv2.line(frame, tuple(imgpts[0].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[1].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[2].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[3].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)

        cv2.line(frame, tuple(imgpts[0+4].ravel()), tuple(imgpts[1+4].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[1+4].ravel()), tuple(imgpts[2+4].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[2+4].ravel()), tuple(imgpts[3+4].ravel()), (0,0,255), 2)
        cv2.line(frame, tuple(imgpts[3+4].ravel()), tuple(imgpts[0+4].ravel()), (0,0,255), 2) 
開發者ID:bxtkezhan,項目名稱:BAR4Py,代碼行數:25,代碼來源:debugtools.py

示例3: drawAxis

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def drawAxis(camera_parameters, markers, frame):
    axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
    mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff

    for marker in markers:
        rvec, tvec = marker.rvec, marker.tvec
        imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
        corners = marker.corners
        corner = tuple(corners[0].ravel())
        cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
        cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
        cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA) 
開發者ID:bxtkezhan,項目名稱:BAR4Py,代碼行數:18,代碼來源:debugtools.py

示例4: phyPosGround2PixelPos

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def phyPosGround2PixelPos(self, pos_coord_ground, return_distort_image_pos=False):
        """
        Transform the physical position in ground coordinate to pixel position
        """
        pos_coord_ground = np.array(pos_coord_ground)
        if len(pos_coord_ground.shape) == 1:
            pos_coord_ground = pos_coord_ground.reshape(-1, 1)

        assert pos_coord_ground.shape == (
            3, 1) or pos_coord_ground.shape == (2, 1)

        homo_pos = np.ones((4, 1), np.float32)
        if pos_coord_ground.shape == (2, 1):
            # by default, z =0 since it's on the ground
            homo_pos[0:2, :] = pos_coord_ground
            
            # (np.random.randn() - 0.5) * 0.05 # add noise to the z-axis
            homo_pos[2, :] = 0
        else:
            homo_pos[0:3, :] = pos_coord_ground
        homo_pos = np.matmul(self.ground_2_camera_trans, homo_pos)
        
        pixel_points, _ = cv2.projectPoints(homo_pos[0:3, :].reshape(1, 1, 3), np.zeros((3, 1)), np.zeros((3, 1)),
                                            self.camera_mat, self.dist_coeffs if return_distort_image_pos else None)
        return pixel_points.reshape((2, 1)) 
開發者ID:araffin,項目名稱:robotics-rl-srl,代碼行數:27,代碼來源:utils.py

示例5: drawBox

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def drawBox(self, img):
        axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
                          [0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
        imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
        imgpts = np.int32(imgpts).reshape(-1,2)

        # draw pillars in blue color
        for i,j in zip(range(4),range(4,8)):
            img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)

        # draw top layer in red color
        outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)

        return outImg

# Debug Code. 
開發者ID:GeekLiB,項目名稱:AR-BXT-AR4Python,代碼行數:18,代碼來源:getPMatrix.py

示例6: draw_overlay

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_overlay(self, vis, tracked):
        x0, y0, x1, y1 = tracked.target.rect
        quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]])
        fx = 0.5 + cv2.getTrackbarPos('focal', 'plane') / 50.0
        h, w = vis.shape[:2]
        K = np.float64([[fx*w, 0, 0.5*(w-1)],
                        [0, fx*w, 0.5*(h-1)],
                        [0.0,0.0,      1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)
        verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2) 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:17,代碼來源:plane_ar.py

示例7: get_head_pose

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
開發者ID:610265158,項目名稱:face_landmark,代碼行數:33,代碼來源:headpose.py

示例8: draw_annotation_box

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_annotation_box(self, image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2):
        """Draw a 3D box as annotation of pose"""
        point_3d = []
        rear_size = 75
        rear_depth = 0
        point_3d.append((-rear_size, -rear_size, rear_depth))
        point_3d.append((-rear_size, rear_size, rear_depth))
        point_3d.append((rear_size, rear_size, rear_depth))
        point_3d.append((rear_size, -rear_size, rear_depth))
        point_3d.append((-rear_size, -rear_size, rear_depth))

        front_size = 100
        front_depth = 100
        point_3d.append((-front_size, -front_size, front_depth))
        point_3d.append((-front_size, front_size, front_depth))
        point_3d.append((front_size, front_size, front_depth))
        point_3d.append((front_size, -front_size, front_depth))
        point_3d.append((-front_size, -front_size, front_depth))
        point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)

        # Map to 2d image points
        (point_2d, _) = cv2.projectPoints(point_3d,
                                          rotation_vector,
                                          translation_vector,
                                          self.camera_matrix,
                                          self.dist_coefs)
        point_2d = np.int32(point_2d.reshape(-1, 2))

        # Draw all the lines
        cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
        cv2.line(image, tuple(point_2d[1]), tuple(
            point_2d[6]), color, line_width, cv2.LINE_AA)
        cv2.line(image, tuple(point_2d[2]), tuple(
            point_2d[7]), color, line_width, cv2.LINE_AA)
        cv2.line(image, tuple(point_2d[3]), tuple(
            point_2d[8]), color, line_width, cv2.LINE_AA) 
開發者ID:kwea123,項目名稱:VTuber_Unity,代碼行數:38,代碼來源:pose_estimator.py

示例9: draw_axis

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_axis(self, img, R, t):
        points = np.float32(
            [[30, 0, 0], [0, 30, 0], [0, 0, 30], [0, 0, 0]]).reshape(-1, 3)

        axisPoints, _ = cv2.projectPoints(
            points, R, t, self.camera_matrix, self.dist_coefs)

        img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
            axisPoints[0].ravel()), (255, 0, 0), 3)
        img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
            axisPoints[1].ravel()), (0, 255, 0), 3)
        img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
            axisPoints[2].ravel()), (0, 0, 255), 3) 
開發者ID:kwea123,項目名稱:VTuber_Unity,代碼行數:15,代碼來源:pose_estimator.py

示例10: draw_quads

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_quads(self, img, quads, color = (0, 255, 0)):
        img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
        img_quads.shape = quads.shape[:2] + (2,)
        for q in img_quads:
            cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2) 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:7,代碼來源:video.py

示例11: draw_axis

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_axis(img, R, t, K):
    # unit is mm
    rotV, _ = cv2.Rodrigues(R)
    points = np.float32([[100, 0, 0], [0, 100, 0], [0, 0, 100], [0, 0, 0]]).reshape(-1, 3)
    axisPoints, _ = cv2.projectPoints(points, rotV, t, K, (0, 0, 0, 0))
    img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (255,0,0), 3)
    img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0,255,0), 3)
    img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (0,0,255), 3)
    return img 
開發者ID:meiqua,項目名稱:patch_linemod,代碼行數:11,代碼來源:patch_linemod_test.py

示例12: draw_quads

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_quads(self, img, quads, color=(0, 255, 0)):
        img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef)[0]
        img_quads.shape = quads.shape[:2] + (2,)
        for q in img_quads:
            cv2.fillConvexPoly(img, np.int32(q * 4), color, cv2.LINE_AA, shift=2) 
開發者ID:mengli,項目名稱:MachineLearning,代碼行數:7,代碼來源:video.py

示例13: project_points

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def project_points(self,
                       points3d: np.ndarray,
                       rvec: Optional[np.ndarray] = None,
                       tvec: Optional[np.ndarray] = None) -> np.ndarray:
        assert points3d.shape[1] == 3
        if rvec is None:
            rvec = np.zeros(3, dtype=np.float)
        if tvec is None:
            tvec = np.zeros(3, dtype=np.float)
        points2d, _ = cv2.projectPoints(points3d, rvec, tvec,
                                        self.camera_matrix,
                                        self.dist_coefficients)
        return points2d.reshape(-1, 2) 
開發者ID:hysts,項目名稱:pytorch_mpiigaze,代碼行數:15,代碼來源:camera.py

示例14: draw_annotation_box

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_annotation_box(self, image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2):
        """Draw a 3D box as annotation of pose"""
        point_3d = []
        rear_size = 75
        rear_depth = 0
        point_3d.append((-rear_size, -rear_size, rear_depth))
        point_3d.append((-rear_size, rear_size, rear_depth))
        point_3d.append((rear_size, rear_size, rear_depth))
        point_3d.append((rear_size, -rear_size, rear_depth))
        point_3d.append((-rear_size, -rear_size, rear_depth))

        front_size = 100
        front_depth = 100
        point_3d.append((-front_size, -front_size, front_depth))
        point_3d.append((-front_size, front_size, front_depth))
        point_3d.append((front_size, front_size, front_depth))
        point_3d.append((front_size, -front_size, front_depth))
        point_3d.append((-front_size, -front_size, front_depth))
        point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)

        # Map to 2d image points
        (point_2d, _) = cv2.projectPoints(point_3d,
                                          rotation_vector,
                                          translation_vector,
                                          self.camera_matrix,
                                          self.dist_coeefs)
        point_2d = np.int32(point_2d.reshape(-1, 2))

        # Draw all the lines
        cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
        cv2.line(image, tuple(point_2d[1]), tuple(
            point_2d[6]), color, line_width, cv2.LINE_AA)
        cv2.line(image, tuple(point_2d[2]), tuple(
            point_2d[7]), color, line_width, cv2.LINE_AA)
        cv2.line(image, tuple(point_2d[3]), tuple(
            point_2d[8]), color, line_width, cv2.LINE_AA) 
開發者ID:yinguobing,項目名稱:image_utility,代碼行數:38,代碼來源:pose_estimator.py

示例15: draw_axis

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import projectPoints [as 別名]
def draw_axis(self, img, R, t):
        points = np.float32(
            [[30, 0, 0], [0, 30, 0], [0, 0, 30], [0, 0, 0]]).reshape(-1, 3)

        axisPoints, _ = cv2.projectPoints(
            points, R, t, self.camera_matrix, self.dist_coeefs)

        img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
            axisPoints[0].ravel()), (255, 0, 0), 3)
        img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
            axisPoints[1].ravel()), (0, 255, 0), 3)
        img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
            axisPoints[2].ravel()), (0, 0, 255), 3) 
開發者ID:yinguobing,項目名稱:image_utility,代碼行數:15,代碼來源:pose_estimator.py


注:本文中的cv2.projectPoints方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。