本文整理匯總了Python中cv2.COLOR_Lab2BGR方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_Lab2BGR方法的具體用法?Python cv2.COLOR_Lab2BGR怎麽用?Python cv2.COLOR_Lab2BGR使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.COLOR_Lab2BGR方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: equalize_clahe_color_lab
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_Lab2BGR [as 別名]
def equalize_clahe_color_lab(img):
"""Equalize the image splitting it after conversion to LAB and applying CLAHE
to the L channel and merging the channels and convert back to BGR
"""
cla = cv2.createCLAHE(clipLimit=4.0)
L, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab))
eq_L = cla.apply(L)
eq_image = cv2.cvtColor(cv2.merge([eq_L, a, b]), cv2.COLOR_Lab2BGR)
return eq_image
開發者ID:PacktPublishing,項目名稱:Mastering-OpenCV-4-with-Python,代碼行數:12,代碼來源:clahe_histogram_equalization.py
示例2: postprocessing
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_Lab2BGR [as 別名]
def postprocessing(self, res, img_l, output_blob, img_size):
update_res = (res[output_blob] * self.color_coeff.transpose()[:, :, np.newaxis, np.newaxis]).sum(1)
out = update_res.transpose((1, 2, 0)).astype(np.float32)
out = cv2.resize(out, img_size)
img_lab_out = np.concatenate((img_l[:, :, np.newaxis], out), axis=2)
new_result = [np.clip(cv2.cvtColor(img_lab_out, cv2.COLOR_Lab2BGR), 0, 1)]
return new_result
示例3: hair
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_Lab2BGR [as 別名]
def hair(image, parsing, part=17, color=[230, 50, 20]):
b, g, r = color #[10, 50, 250] # [10, 250, 10]
tar_color = np.zeros_like(image)
tar_color[:, :, 0] = b
tar_color[:, :, 1] = g
tar_color[:, :, 2] = r
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV)
if part == 12 or part == 13:
image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2]
else:
image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1]
changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)
if part == 17:
changed = sharpen(changed)
changed[parsing != part] = image[parsing != part]
# changed = cv2.resize(changed, (512, 512))
return changed
#
# def lip(image, parsing, part=17, color=[230, 50, 20]):
# b, g, r = color #[10, 50, 250] # [10, 250, 10]
# tar_color = np.zeros_like(image)
# tar_color[:, :, 0] = b
# tar_color[:, :, 1] = g
# tar_color[:, :, 2] = r
#
# image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
# il, ia, ib = cv2.split(image_lab)
#
# tar_lab = cv2.cvtColor(tar_color, cv2.COLOR_BGR2Lab)
# tl, ta, tb = cv2.split(tar_lab)
#
# image_lab[:, :, 0] = np.clip(il - np.mean(il) + tl, 0, 100)
# image_lab[:, :, 1] = np.clip(ia - np.mean(ia) + ta, -127, 128)
# image_lab[:, :, 2] = np.clip(ib - np.mean(ib) + tb, -127, 128)
#
#
# changed = cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR)
#
# if part == 17:
# changed = sharpen(changed)
#
# changed[parsing != part] = image[parsing != part]
# # changed = cv2.resize(changed, (512, 512))
# return changed