当前位置: 首页>>代码示例>>Python>>正文


Python cv2.vconcat方法代码示例

本文整理汇总了Python中cv2.vconcat方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.vconcat方法的具体用法?Python cv2.vconcat怎么用?Python cv2.vconcat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.vconcat方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: define_new_pose_configuration

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import vconcat [as 别名]
def define_new_pose_configuration(configName, noAnimals, noBps, Imagepath, BpNameList, animalNumber):
    global ix, iy
    global centerCordStatus

    def draw_circle(event,x,y,flags,param):
        global ix,iy
        global centerCordStatus
        if (event == cv2.EVENT_LBUTTONDBLCLK):
            if centerCordStatus == False:
                cv2.circle(overlay,(x,y-sideImageHeight),10,colorList[-i],-1)
                cv2.putText(overlay,str(bpNumber+1), (x+4,y-sideImageHeight), cv2.FONT_HERSHEY_SIMPLEX, 0.7, colorList[i], 2)
                cv2.imshow('Define pose', overlay)
                centerCordStatus = True
    im = cv2.imread(Imagepath)
    imHeight, imWidth = im.shape[0], im.shape[1]
    if imWidth < 300:
        im = imutils.resize(im, width=800)
        imHeight, imWidth = im.shape[0], im.shape[1]
        im = np.uint8(im)
    fontScale = max(imWidth, imHeight) / (max(imWidth, imHeight) * 1.2)
    cv2.namedWindow('Define pose', cv2.WINDOW_NORMAL)
    overlay = im.copy()
    colorList = []
    for color in range(len(BpNameList)):
        r, g, b = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
        colorTuple = (r, g, b)
        colorList.append(colorTuple)
    for i in range(len(BpNameList)):
        cv2.namedWindow('Define pose', cv2.WINDOW_NORMAL)
        centerCordStatus = False
        bpNumber = i
        sideImage = np.zeros((100, imWidth, 3), np.uint8)
        sideImageHeight, sideImageWidth = sideImage.shape[0], sideImage.shape[1]
        cv2.putText(sideImage, 'Double left click ' + BpNameList[i] + '. Press ESC to continue.', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, fontScale, colorList[i], 2)
        ix, iy = -1, -1
        while (1):
            cv2.setMouseCallback('Define pose', draw_circle)
            imageConcat = cv2.vconcat([sideImage, overlay])
            cv2.imshow('Define pose', imageConcat)
            k = cv2.waitKey(20) & 0xFF
            if k == 27:
                cv2.destroyWindow('Define pose')
                break

    overlay = cv2.resize(overlay, (250,300))
    imagePath = os.path.join(os.getcwd(), 'pose_configurations', 'schematics')
    namePath = os.path.join(os.getcwd(), 'pose_configurations', 'configuration_names', 'pose_config_names.csv')
    bpPath = os.path.join(os.getcwd(), 'pose_configurations', 'bp_names', 'bp_names.csv')
    noAnimalsPath = os.path.join(os.getcwd(), 'pose_configurations', 'no_animals', 'no_animals.csv')
    imageNos = len(glob.glob(imagePath + '/*.png'))
    newImageName = 'Picture' + str(imageNos+1) + '.png'
    imageOutPath = os.path.join(imagePath, newImageName)
    BpNameList = ','.join(BpNameList)

    with open(namePath, 'a') as fd:
        fd.write(configName + '\n')
    with open(bpPath, 'a') as fd:
        fd.write(BpNameList + '\n')
    with open(noAnimalsPath, 'a') as fd:
        fd.write(animalNumber + '\n')
    cv2.imwrite(imageOutPath, overlay) 
开发者ID:sgoldenlab,项目名称:simba,代码行数:63,代码来源:define_new_pose_config.py

示例2: generate_training_output

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import vconcat [as 别名]
def generate_training_output(colors_1, scaled_depth_maps_1, boundaries, intrinsic_matrices, is_hsv, epoch,
                             results_root):
    color_inputs_cpu = colors_1.data.cpu().numpy()
    pred_depths_cpu = scaled_depth_maps_1.data.cpu().numpy()
    boundaries_cpu = boundaries.data.cpu().numpy()
    intrinsics_cpu = intrinsic_matrices.data.cpu().numpy()
    color_imgs = []
    pred_depth_imgs = []

    for j in range(colors_1.shape[0]):
        color_img = color_inputs_cpu[j]
        pred_depth_img = pred_depths_cpu[j]

        color_img = np.moveaxis(color_img, source=[0, 1, 2], destination=[2, 0, 1])
        color_img = color_img * 0.5 + 0.5
        color_img[color_img < 0.0] = 0.0
        color_img[color_img > 1.0] = 1.0
        color_img = np.uint8(255 * color_img)
        if is_hsv:
            color_img = cv2.cvtColor(color_img, cv2.COLOR_HSV2BGR_FULL)

        pred_depth_img = np.moveaxis(pred_depth_img, source=[0, 1, 2], destination=[2, 0, 1])

        if j == 0:
            # Write point cloud
            boundary = boundaries_cpu[j]
            intrinsic = intrinsics_cpu[j]
            boundary = np.moveaxis(boundary, source=[0, 1, 2], destination=[2, 0, 1])
            point_cloud = point_cloud_from_depth(pred_depth_img, color_img, boundary,
                                                 intrinsic,
                                                 point_cloud_downsampling=1)
            write_point_cloud(
                str(results_root / "point_cloud_epoch_{epoch}_index_{index}.ply".format(epoch=epoch,
                                                                                        index=j)),
                point_cloud)

        color_img = cv2.resize(color_img, dsize=(300, 300))
        pred_depth_img = cv2.resize(pred_depth_img, dsize=(300, 300))
        color_imgs.append(color_img)

        if j == 0:
            histr = cv2.calcHist([pred_depth_img], [0], None, histSize=[100], ranges=[0, 1000])
            plt.plot(histr, color='b')
            plt.xlim([0, 40])
            plt.savefig(
                str(results_root / 'generated_depth_hist_{epoch}.jpg'.format(epoch=epoch)))
            plt.clf()
        display_depth_img = display_depth_map(pred_depth_img)
        pred_depth_imgs.append(display_depth_img)

    final_color = color_imgs[0]
    final_pred_depth = pred_depth_imgs[0]
    for j in range(colors_1.shape[0] - 1):
        final_color = cv2.hconcat((final_color, color_imgs[j + 1]))
        final_pred_depth = cv2.hconcat((final_pred_depth, pred_depth_imgs[j + 1]))

    final = cv2.vconcat((final_color, final_pred_depth))
    cv2.imwrite(str(results_root / 'generated_mask_{epoch}.jpg'.format(epoch=epoch)),
                final) 
开发者ID:lppllppl920,项目名称:EndoscopyDepthEstimation-Pytorch,代码行数:61,代码来源:utils.py


注:本文中的cv2.vconcat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。