本文整理匯總了Python中cv2.decomposeProjectionMatrix方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.decomposeProjectionMatrix方法的具體用法?Python cv2.decomposeProjectionMatrix怎麽用?Python cv2.decomposeProjectionMatrix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.decomposeProjectionMatrix方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: solve_head_pose
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import decomposeProjectionMatrix [as 別名]
def solve_head_pose(self, face_landmarks):
indices = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35]
image_pts = np.zeros((len(indices), 2))
for i in range(len(indices)):
part = face_landmarks.part(indices[i])
image_pts[i, 0] = part.x
image_pts[i, 1] = part.y
_, rotation_vec, translation_vec = cv2.solvePnP(self.face_model_points,
image_pts,
self.camera_matrix,
self.distortion_coeffs)
projected_head_pose_box_points, _ = cv2.projectPoints(self.head_pose_box_points,
rotation_vec,
translation_vec,
self.camera_matrix,
self.distortion_coeffs)
projected_head_pose_box_points = tuple(map(tuple, projected_head_pose_box_points.reshape(8, 2)))
# Calculate euler angle
rotation_mat, _ = cv2.Rodrigues(rotation_vec)
pose_mat = cv2.hconcat((rotation_mat, translation_vec))
_, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
return projected_head_pose_box_points, euler_angles
示例2: get_head_pose
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import decomposeProjectionMatrix [as 別名]
def get_head_pose(shape,img):
h,w,_=img.shape
K = [w, 0.0, w//2,
0.0, w, h//2,
0.0, 0.0, 1.0]
# Assuming no lens distortion
D = [0, 0, 0.0, 0.0, 0]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
# image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
# shape[39], shape[42], shape[45], shape[31], shape[35],
# shape[48], shape[54], shape[57], shape[8]])
image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
shape[39], shape[42], shape[45], shape[31], shape[35]])
_, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
dist_coeffs)
reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))
# calc euler angle
rotation_mat, _ = cv2.Rodrigues(rotation_vec)
pose_mat = cv2.hconcat((rotation_mat, translation_vec))
_, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)
return reprojectdst, euler_angle
示例3: face_orientation
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import decomposeProjectionMatrix [as 別名]
def face_orientation(frame, landmarks):
size = frame.shape #(height, width, color_channel)
image_points = np.array([
(landmarks[4], landmarks[5]), # Nose tip
(landmarks[10], landmarks[11]), # Chin
(landmarks[0], landmarks[1]), # Left eye left corner
(landmarks[2], landmarks[3]), # Right eye right corne
(landmarks[6], landmarks[7]), # Left Mouth corner
(landmarks[8], landmarks[9]) # Right mouth corner
], dtype="double")
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-165.0, 170.0, -135.0), # Left eye left corner
(165.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
center = (size[1]/2, size[0]/2)
focal_length = center[0] / np.tan(60/2 * np.pi / 180)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.CV_ITERATIVE)
axis = np.float32([[500,0,0],
[0,500,0],
[0,0,500]])
imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
proj_matrix = np.hstack((rvec_matrix, translation_vector))
eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]
pitch = math.degrees(math.asin(math.sin(pitch)))
roll = -math.degrees(math.asin(math.sin(roll)))
yaw = math.degrees(math.asin(math.sin(yaw)))
return imgpts, modelpts, (str(int(roll)), str(int(pitch)), str(int(yaw))), (landmarks[4], landmarks[5])
開發者ID:jerryhouuu,項目名稱:Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV,代碼行數:60,代碼來源:pose_estimation.py