当前位置: 首页>>代码示例>>Python>>正文


Python cv2.COLOR_Lab2BGR方法代码示例

本文整理汇总了Python中cv2.COLOR_Lab2BGR方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_Lab2BGR方法的具体用法?Python cv2.COLOR_Lab2BGR怎么用?Python cv2.COLOR_Lab2BGR使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_Lab2BGR方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: equalize_clahe_color_lab

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_Lab2BGR [as 别名]
def equalize_clahe_color_lab(img):
    """Equalize the image splitting it after conversion to LAB and applying CLAHE
    to the L channel and merging the channels and convert back to BGR
    """

    cla = cv2.createCLAHE(clipLimit=4.0)
    L, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab))
    eq_L = cla.apply(L)
    eq_image = cv2.cvtColor(cv2.merge([eq_L, a, b]), cv2.COLOR_Lab2BGR)
    return eq_image 
开发者ID:PacktPublishing,项目名称:Mastering-OpenCV-4-with-Python,代码行数:12,代码来源:clahe_histogram_equalization.py

示例2: postprocessing

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_Lab2BGR [as 别名]
def postprocessing(self, res, img_l, output_blob, img_size):
        update_res = (res[output_blob] * self.color_coeff.transpose()[:, :, np.newaxis, np.newaxis]).sum(1)

        out = update_res.transpose((1, 2, 0)).astype(np.float32)
        out = cv2.resize(out, img_size)
        img_lab_out = np.concatenate((img_l[:, :, np.newaxis], out), axis=2)
        new_result = [np.clip(cv2.cvtColor(img_lab_out, cv2.COLOR_Lab2BGR), 0, 1)]
        return new_result 
开发者ID:opencv,项目名称:open_model_zoo,代码行数:10,代码来源:colorization_evaluator.py

示例3: hair

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_Lab2BGR [as 别名]
def hair(image, parsing, part=17, color=[230, 50, 20]):
    b, g, r = color      #[10, 50, 250]       # [10, 250, 10]
    tar_color = np.zeros_like(image)
    tar_color[:, :, 0] = b
    tar_color[:, :, 1] = g
    tar_color[:, :, 2] = r

    image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV)

    if part == 12 or part == 13:
        image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2]
    else:
        image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1]

    changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)

    if part == 17:
        changed = sharpen(changed)

    changed[parsing != part] = image[parsing != part]
    # changed = cv2.resize(changed, (512, 512))
    return changed

#
# def lip(image, parsing, part=17, color=[230, 50, 20]):
#     b, g, r = color      #[10, 50, 250]       # [10, 250, 10]
#     tar_color = np.zeros_like(image)
#     tar_color[:, :, 0] = b
#     tar_color[:, :, 1] = g
#     tar_color[:, :, 2] = r
#
#     image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
#     il, ia, ib = cv2.split(image_lab)
#
#     tar_lab = cv2.cvtColor(tar_color, cv2.COLOR_BGR2Lab)
#     tl, ta, tb = cv2.split(tar_lab)
#
#     image_lab[:, :, 0] = np.clip(il - np.mean(il) + tl, 0, 100)
#     image_lab[:, :, 1] = np.clip(ia - np.mean(ia) + ta, -127, 128)
#     image_lab[:, :, 2] = np.clip(ib - np.mean(ib) + tb, -127, 128)
#
#
#     changed = cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR)
#
#     if part == 17:
#         changed = sharpen(changed)
#
#     changed[parsing != part] = image[parsing != part]
#     # changed = cv2.resize(changed, (512, 512))
#     return changed 
开发者ID:zllrunning,项目名称:face-parsing.PyTorch,代码行数:53,代码来源:makeup.py


注:本文中的cv2.COLOR_Lab2BGR方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。