當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.hconcat方法代碼示例

本文整理匯總了Python中cv2.hconcat方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.hconcat方法的具體用法?Python cv2.hconcat怎麽用?Python cv2.hconcat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.hconcat方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: solve_head_pose

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import hconcat [as 別名]
def solve_head_pose(self, face_landmarks):
        indices = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35]
        image_pts = np.zeros((len(indices), 2))
        for i in range(len(indices)):
            part = face_landmarks.part(indices[i])
            image_pts[i, 0] = part.x
            image_pts[i, 1] = part.y

        _, rotation_vec, translation_vec = cv2.solvePnP(self.face_model_points,
                                                        image_pts,
                                                        self.camera_matrix,
                                                        self.distortion_coeffs)
        projected_head_pose_box_points, _ = cv2.projectPoints(self.head_pose_box_points,
                                                              rotation_vec,
                                                              translation_vec,
                                                              self.camera_matrix,
                                                              self.distortion_coeffs)
        projected_head_pose_box_points = tuple(map(tuple, projected_head_pose_box_points.reshape(8, 2)))

        # Calculate euler angle
        rotation_mat, _ = cv2.Rodrigues(rotation_vec)
        pose_mat = cv2.hconcat((rotation_mat, translation_vec))
        _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
        return projected_head_pose_box_points, euler_angles 
開發者ID:pkhungurn,項目名稱:talking-head-anime-demo,代碼行數:26,代碼來源:head_pose_solver.py

示例2: get_head_pose

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import hconcat [as 別名]
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
開發者ID:610265158,項目名稱:face_landmark,代碼行數:33,代碼來源:headpose.py

示例3: image_hcombine

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import hconcat [as 別名]
def image_hcombine(im_info1, im_info2):
    img1 = im_info1[0]
    img2 = im_info2[0]
    color_flag1 = im_info1[1]
    color_flag2 = im_info2[1]
 
    if color_flag1 == 1:
        h1, w1, ch1 = img1.shape[:3]
    else:
        h1, w1 = img1.shape[:2]
 
    if color_flag2 == 1:
        h2, w2, ch2 = img2.shape[:3]
    else:
        h2, w2 = img2.shape[:2]
 
    if h1 < h2:
        h1 = h2
        w1 = int((h2 / h1) * w2)
        img1 = cv2.resize(img1, (w1, h1))
    else:
        h2 = h1
        w2 = int((h1 / h2) * w1)
        img2 = cv2.resize(img2, (w2, h2))
 
    img = cv2.hconcat([img1, img2])
    return img 
開發者ID:PINTO0309,項目名稱:PINTO_model_zoo,代碼行數:29,代碼來源:movie_concat.py

示例4: detectTime

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import hconcat [as 別名]
def detectTime(self, time_binary):
#        img_gray = cv2.cvtColor(time_img, cv2.COLOR_BGR2GRAY)
#        ret, thresh1 = cv2.threshold(img_gray, 230, 255, cv2.THRESH_BINARY_INV)
        final_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.25)), np.uint8)
        right_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.15)), np.uint8)
        separate_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.1)), np.uint8)
        profile = []
        letter_start = []
        letter_end = []
        count = 0
        valley_threshold = 256
        # get letters separation pixels
        for i in range(time_binary.shape[1]):
            sum_vertical = sum(time_binary[:, i])
            profile.append(sum_vertical)
            if len(letter_start) == len(letter_end):
                if sum_vertical > valley_threshold:
                    letter_start.append(i)
            else:
                if sum_vertical <= valley_threshold:
                    letter_end.append(i)
                    count = count + 1
        # Add blank(black) space between letters
        for i in range(count):
            final_img = cv2.hconcat([final_img, time_binary[0:time_binary.shape[0], letter_start[i]:letter_end[i]]])
            final_img = cv2.hconcat([final_img, separate_img])
        final_img = cv2.hconcat([final_img, right_img])
        kernel = np.ones((2, 2), np.uint8)
        final_img = cv2.dilate(final_img, kernel, iterations=1)
        cv2.imwrite(self.timefile, final_img)
        text = pytesseract.image_to_string(Image.open(self.timefile),
                                           config='-c tessedit_char_whitelist=1234567890:~-AMP -psm 7')
        return text 
開發者ID:mzsmakr,項目名稱:PGSS,代碼行數:35,代碼來源:raidnearby.py

示例5: iterative_contour_alignment

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import hconcat [as 別名]
def iterative_contour_alignment(self, refinable, max_iterations=3,
                                    min_rotation_displacement=0.5,
                                    min_translation_displacement=0.0025, display=False):
        assert refinable is not None

        last_pose = np.copy(refinable.hypo_pose)
        for i in range(max_iterations):

            refinable = self.refine(refinable=refinable)

            last_trans = last_pose[:3, 3]
            last_rot = Quaternion(matrix2quaternion(last_pose[:3, :3]))

            cur_trans = refinable.hypo_pose[:3, 3]
            cur_rot = Quaternion(matrix2quaternion(refinable.hypo_pose[:3, :3]))

            trans_diff = np.linalg.norm(cur_trans - last_trans)
            update_q = cur_rot * last_rot.inverse
            angular_diff = np.abs((update_q).degrees)

            last_pose = np.copy(refinable.hypo_pose)

            if display:
                concat = cv2.hconcat([refinable.input_col, refinable.hypo_col])
                cv2.imshow('test', concat)
                cv2.waitKey(500)

            if angular_diff <= min_rotation_displacement and trans_diff <= min_translation_displacement:
                refinable.iterations = i+1
                return refinable

        refinable.iterations = max_iterations
        return refinable 
開發者ID:fabi92,項目名稱:eccv18-rgb_pose_refinement,代碼行數:35,代碼來源:refiner.py

示例6: generate_training_output

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import hconcat [as 別名]
def generate_training_output(colors_1, scaled_depth_maps_1, boundaries, intrinsic_matrices, is_hsv, epoch,
                             results_root):
    color_inputs_cpu = colors_1.data.cpu().numpy()
    pred_depths_cpu = scaled_depth_maps_1.data.cpu().numpy()
    boundaries_cpu = boundaries.data.cpu().numpy()
    intrinsics_cpu = intrinsic_matrices.data.cpu().numpy()
    color_imgs = []
    pred_depth_imgs = []

    for j in range(colors_1.shape[0]):
        color_img = color_inputs_cpu[j]
        pred_depth_img = pred_depths_cpu[j]

        color_img = np.moveaxis(color_img, source=[0, 1, 2], destination=[2, 0, 1])
        color_img = color_img * 0.5 + 0.5
        color_img[color_img < 0.0] = 0.0
        color_img[color_img > 1.0] = 1.0
        color_img = np.uint8(255 * color_img)
        if is_hsv:
            color_img = cv2.cvtColor(color_img, cv2.COLOR_HSV2BGR_FULL)

        pred_depth_img = np.moveaxis(pred_depth_img, source=[0, 1, 2], destination=[2, 0, 1])

        if j == 0:
            # Write point cloud
            boundary = boundaries_cpu[j]
            intrinsic = intrinsics_cpu[j]
            boundary = np.moveaxis(boundary, source=[0, 1, 2], destination=[2, 0, 1])
            point_cloud = point_cloud_from_depth(pred_depth_img, color_img, boundary,
                                                 intrinsic,
                                                 point_cloud_downsampling=1)
            write_point_cloud(
                str(results_root / "point_cloud_epoch_{epoch}_index_{index}.ply".format(epoch=epoch,
                                                                                        index=j)),
                point_cloud)

        color_img = cv2.resize(color_img, dsize=(300, 300))
        pred_depth_img = cv2.resize(pred_depth_img, dsize=(300, 300))
        color_imgs.append(color_img)

        if j == 0:
            histr = cv2.calcHist([pred_depth_img], [0], None, histSize=[100], ranges=[0, 1000])
            plt.plot(histr, color='b')
            plt.xlim([0, 40])
            plt.savefig(
                str(results_root / 'generated_depth_hist_{epoch}.jpg'.format(epoch=epoch)))
            plt.clf()
        display_depth_img = display_depth_map(pred_depth_img)
        pred_depth_imgs.append(display_depth_img)

    final_color = color_imgs[0]
    final_pred_depth = pred_depth_imgs[0]
    for j in range(colors_1.shape[0] - 1):
        final_color = cv2.hconcat((final_color, color_imgs[j + 1]))
        final_pred_depth = cv2.hconcat((final_pred_depth, pred_depth_imgs[j + 1]))

    final = cv2.vconcat((final_color, final_pred_depth))
    cv2.imwrite(str(results_root / 'generated_mask_{epoch}.jpg'.format(epoch=epoch)),
                final) 
開發者ID:lppllppl920,項目名稱:EndoscopyDepthEstimation-Pytorch,代碼行數:61,代碼來源:utils.py


注:本文中的cv2.hconcat方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。