当前位置: 首页>>代码示例>>Python>>正文


Python cv2.hconcat方法代码示例

本文整理汇总了Python中cv2.hconcat方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.hconcat方法的具体用法?Python cv2.hconcat怎么用?Python cv2.hconcat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.hconcat方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: solve_head_pose

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import hconcat [as 别名]
def solve_head_pose(self, face_landmarks):
        indices = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35]
        image_pts = np.zeros((len(indices), 2))
        for i in range(len(indices)):
            part = face_landmarks.part(indices[i])
            image_pts[i, 0] = part.x
            image_pts[i, 1] = part.y

        _, rotation_vec, translation_vec = cv2.solvePnP(self.face_model_points,
                                                        image_pts,
                                                        self.camera_matrix,
                                                        self.distortion_coeffs)
        projected_head_pose_box_points, _ = cv2.projectPoints(self.head_pose_box_points,
                                                              rotation_vec,
                                                              translation_vec,
                                                              self.camera_matrix,
                                                              self.distortion_coeffs)
        projected_head_pose_box_points = tuple(map(tuple, projected_head_pose_box_points.reshape(8, 2)))

        # Calculate euler angle
        rotation_mat, _ = cv2.Rodrigues(rotation_vec)
        pose_mat = cv2.hconcat((rotation_mat, translation_vec))
        _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
        return projected_head_pose_box_points, euler_angles 
开发者ID:pkhungurn,项目名称:talking-head-anime-demo,代码行数:26,代码来源:head_pose_solver.py

示例2: get_head_pose

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import hconcat [as 别名]
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
开发者ID:610265158,项目名称:face_landmark,代码行数:33,代码来源:headpose.py

示例3: image_hcombine

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import hconcat [as 别名]
def image_hcombine(im_info1, im_info2):
    img1 = im_info1[0]
    img2 = im_info2[0]
    color_flag1 = im_info1[1]
    color_flag2 = im_info2[1]
 
    if color_flag1 == 1:
        h1, w1, ch1 = img1.shape[:3]
    else:
        h1, w1 = img1.shape[:2]
 
    if color_flag2 == 1:
        h2, w2, ch2 = img2.shape[:3]
    else:
        h2, w2 = img2.shape[:2]
 
    if h1 < h2:
        h1 = h2
        w1 = int((h2 / h1) * w2)
        img1 = cv2.resize(img1, (w1, h1))
    else:
        h2 = h1
        w2 = int((h1 / h2) * w1)
        img2 = cv2.resize(img2, (w2, h2))
 
    img = cv2.hconcat([img1, img2])
    return img 
开发者ID:PINTO0309,项目名称:PINTO_model_zoo,代码行数:29,代码来源:movie_concat.py

示例4: detectTime

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import hconcat [as 别名]
def detectTime(self, time_binary):
#        img_gray = cv2.cvtColor(time_img, cv2.COLOR_BGR2GRAY)
#        ret, thresh1 = cv2.threshold(img_gray, 230, 255, cv2.THRESH_BINARY_INV)
        final_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.25)), np.uint8)
        right_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.15)), np.uint8)
        separate_img = np.zeros((time_binary.shape[0], int(time_binary.shape[1] * 0.1)), np.uint8)
        profile = []
        letter_start = []
        letter_end = []
        count = 0
        valley_threshold = 256
        # get letters separation pixels
        for i in range(time_binary.shape[1]):
            sum_vertical = sum(time_binary[:, i])
            profile.append(sum_vertical)
            if len(letter_start) == len(letter_end):
                if sum_vertical > valley_threshold:
                    letter_start.append(i)
            else:
                if sum_vertical <= valley_threshold:
                    letter_end.append(i)
                    count = count + 1
        # Add blank(black) space between letters
        for i in range(count):
            final_img = cv2.hconcat([final_img, time_binary[0:time_binary.shape[0], letter_start[i]:letter_end[i]]])
            final_img = cv2.hconcat([final_img, separate_img])
        final_img = cv2.hconcat([final_img, right_img])
        kernel = np.ones((2, 2), np.uint8)
        final_img = cv2.dilate(final_img, kernel, iterations=1)
        cv2.imwrite(self.timefile, final_img)
        text = pytesseract.image_to_string(Image.open(self.timefile),
                                           config='-c tessedit_char_whitelist=1234567890:~-AMP -psm 7')
        return text 
开发者ID:mzsmakr,项目名称:PGSS,代码行数:35,代码来源:raidnearby.py

示例5: iterative_contour_alignment

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import hconcat [as 别名]
def iterative_contour_alignment(self, refinable, max_iterations=3,
                                    min_rotation_displacement=0.5,
                                    min_translation_displacement=0.0025, display=False):
        assert refinable is not None

        last_pose = np.copy(refinable.hypo_pose)
        for i in range(max_iterations):

            refinable = self.refine(refinable=refinable)

            last_trans = last_pose[:3, 3]
            last_rot = Quaternion(matrix2quaternion(last_pose[:3, :3]))

            cur_trans = refinable.hypo_pose[:3, 3]
            cur_rot = Quaternion(matrix2quaternion(refinable.hypo_pose[:3, :3]))

            trans_diff = np.linalg.norm(cur_trans - last_trans)
            update_q = cur_rot * last_rot.inverse
            angular_diff = np.abs((update_q).degrees)

            last_pose = np.copy(refinable.hypo_pose)

            if display:
                concat = cv2.hconcat([refinable.input_col, refinable.hypo_col])
                cv2.imshow('test', concat)
                cv2.waitKey(500)

            if angular_diff <= min_rotation_displacement and trans_diff <= min_translation_displacement:
                refinable.iterations = i+1
                return refinable

        refinable.iterations = max_iterations
        return refinable 
开发者ID:fabi92,项目名称:eccv18-rgb_pose_refinement,代码行数:35,代码来源:refiner.py

示例6: generate_training_output

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import hconcat [as 别名]
def generate_training_output(colors_1, scaled_depth_maps_1, boundaries, intrinsic_matrices, is_hsv, epoch,
                             results_root):
    color_inputs_cpu = colors_1.data.cpu().numpy()
    pred_depths_cpu = scaled_depth_maps_1.data.cpu().numpy()
    boundaries_cpu = boundaries.data.cpu().numpy()
    intrinsics_cpu = intrinsic_matrices.data.cpu().numpy()
    color_imgs = []
    pred_depth_imgs = []

    for j in range(colors_1.shape[0]):
        color_img = color_inputs_cpu[j]
        pred_depth_img = pred_depths_cpu[j]

        color_img = np.moveaxis(color_img, source=[0, 1, 2], destination=[2, 0, 1])
        color_img = color_img * 0.5 + 0.5
        color_img[color_img < 0.0] = 0.0
        color_img[color_img > 1.0] = 1.0
        color_img = np.uint8(255 * color_img)
        if is_hsv:
            color_img = cv2.cvtColor(color_img, cv2.COLOR_HSV2BGR_FULL)

        pred_depth_img = np.moveaxis(pred_depth_img, source=[0, 1, 2], destination=[2, 0, 1])

        if j == 0:
            # Write point cloud
            boundary = boundaries_cpu[j]
            intrinsic = intrinsics_cpu[j]
            boundary = np.moveaxis(boundary, source=[0, 1, 2], destination=[2, 0, 1])
            point_cloud = point_cloud_from_depth(pred_depth_img, color_img, boundary,
                                                 intrinsic,
                                                 point_cloud_downsampling=1)
            write_point_cloud(
                str(results_root / "point_cloud_epoch_{epoch}_index_{index}.ply".format(epoch=epoch,
                                                                                        index=j)),
                point_cloud)

        color_img = cv2.resize(color_img, dsize=(300, 300))
        pred_depth_img = cv2.resize(pred_depth_img, dsize=(300, 300))
        color_imgs.append(color_img)

        if j == 0:
            histr = cv2.calcHist([pred_depth_img], [0], None, histSize=[100], ranges=[0, 1000])
            plt.plot(histr, color='b')
            plt.xlim([0, 40])
            plt.savefig(
                str(results_root / 'generated_depth_hist_{epoch}.jpg'.format(epoch=epoch)))
            plt.clf()
        display_depth_img = display_depth_map(pred_depth_img)
        pred_depth_imgs.append(display_depth_img)

    final_color = color_imgs[0]
    final_pred_depth = pred_depth_imgs[0]
    for j in range(colors_1.shape[0] - 1):
        final_color = cv2.hconcat((final_color, color_imgs[j + 1]))
        final_pred_depth = cv2.hconcat((final_pred_depth, pred_depth_imgs[j + 1]))

    final = cv2.vconcat((final_color, final_pred_depth))
    cv2.imwrite(str(results_root / 'generated_mask_{epoch}.jpg'.format(epoch=epoch)),
                final) 
开发者ID:lppllppl920,项目名称:EndoscopyDepthEstimation-Pytorch,代码行数:61,代码来源:utils.py


注:本文中的cv2.hconcat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。