當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.COLOR_LAB2BGR屬性代碼示例

本文整理匯總了Python中cv2.COLOR_LAB2BGR屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_LAB2BGR屬性的具體用法?Python cv2.COLOR_LAB2BGR怎麽用?Python cv2.COLOR_LAB2BGR使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_LAB2BGR屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: convert_to_original_colors

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def convert_to_original_colors(content_img, stylized_img):
  content_img  = postprocess(content_img)
  stylized_img = postprocess(stylized_img)
  if args.color_convert_type == 'yuv':
    cvt_type = cv2.COLOR_BGR2YUV
    inv_cvt_type = cv2.COLOR_YUV2BGR
  elif args.color_convert_type == 'ycrcb':
    cvt_type = cv2.COLOR_BGR2YCR_CB
    inv_cvt_type = cv2.COLOR_YCR_CB2BGR
  elif args.color_convert_type == 'luv':
    cvt_type = cv2.COLOR_BGR2LUV
    inv_cvt_type = cv2.COLOR_LUV2BGR
  elif args.color_convert_type == 'lab':
    cvt_type = cv2.COLOR_BGR2LAB
    inv_cvt_type = cv2.COLOR_LAB2BGR
  content_cvt = cv2.cvtColor(content_img, cvt_type)
  stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
  c1, _, _ = cv2.split(stylized_cvt)
  _, c2, c3 = cv2.split(content_cvt)
  merged = cv2.merge((c1, c2, c3))
  dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
  dst = preprocess(dst)
  return dst 
開發者ID:cysmith,項目名稱:neural-style-tf,代碼行數:25,代碼來源:neural_style.py

示例2: renderEnvLuminosityNoise

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
        """
        render the different environment luminosity
        """
        # variate luminosity and color
        origin_image_LAB = cv2.cvtColor(
            origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
        origin_image_LAB[:, :, 0] = origin_image_LAB[:,
                                                     :, 0] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 1] = origin_image_LAB[:,
                                                     :, 1] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 2] = origin_image_LAB[:,
                                                     :, 2] * (np.random.randn() * noise_var + 1.0)
        out_image = cv2.cvtColor(
            origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
        return out_image 
開發者ID:araffin,項目名稱:robotics-rl-srl,代碼行數:18,代碼來源:omnirobot_simulator_server.py

示例3: equalize_light

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def equalize_light(image, limit=3, grid=(7,7), gray=False):
    if (len(image.shape) == 2):
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        gray = True
    
    clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=grid)
    lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)

    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))

    image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    if gray: 
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    return np.uint8(image) 
開發者ID:arthurflor23,項目名稱:surface-crack-detection,代碼行數:19,代碼來源:image.py

示例4: histogram_equalization

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def histogram_equalization(img):

    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    return final 
開發者ID:baumgach,項目名稱:PHiSeg-code,代碼行數:20,代碼來源:phiseg_makegif_samples.py

示例5: transfer_avg_color

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def transfer_avg_color(img_old,img_new):
  assert(img_old.shape==img_new.shape) 
  source = cv2.cvtColor(img_old, cv2.COLOR_BGR2LAB).astype("float32")
  target = cv2.cvtColor(img_new, cv2.COLOR_BGR2LAB).astype("float32")

  (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
  (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)

  (l, a, b) = cv2.split(target)

  l -= lMeanTar
  a -= aMeanTar
  b -= bMeanTar

  l = (lStdTar / lStdSrc) * l
  a = (aStdTar / aStdSrc) * a
  b = (bStdTar / bStdSrc) * b

  l += lMeanSrc
  a += aMeanSrc
  b += bMeanSrc

  l = numpy.clip(l, 0, 255)
  a = numpy.clip(a, 0, 255)
  b = numpy.clip(b, 0, 255)

  transfer = cv2.merge([l, a, b])
  transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)

  return transfer 
開發者ID:dfaker,項目名稱:df,代碼行數:32,代碼來源:merge_faces_larger.py

示例6: lightness

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def lightness(img, amount=0.25):

    try:
        # Only works with BGR images
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        lab[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount)
        lab[:, :, 0].clip(0, 255)
        img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    except:
        pass

    return img 
開發者ID:kahst,項目名稱:BirdCLEF-Baseline,代碼行數:14,代碼來源:image.py

示例7: correct_lightness

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def correct_lightness(img: np.ndarray):
        if len(np.shape(img)) == 3:
            img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
            l, a, b = cv2.split(img_lab)
            clahe = cv2.createCLAHE(clipLimit=40.0, tileGridSize=(4, 4))
            l = clahe.apply(l)
            img = cv2.merge((l, a, b))
            img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
        return img 
開發者ID:haruiz,項目名稱:CvStudio,代碼行數:11,代碼來源:img_util.py

示例8: light

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def light(im1_name, im2_name):
    # im1
    im = cv2.imread(im1_name)
    im = im.astype(np.float32)
    im /= 255.
    im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
    l = im_lab[:, :, 0]
    L1_mean = np.mean(l)
    L1_std = np.std(l)

    # im2
    im = cv2.imread(im2_name)
    im = im.astype(np.float32)
    im /= 255.
    im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
    l = im_lab[:, :, 0]
    L2_mean = np.mean(l)
    L2_std = np.std(l)

    if L2_std != 0:
        l = (l - L2_mean) / L2_std * L1_std + L1_mean
    l = l[:, :, np.newaxis]
    im_lab = np.concatenate((l, im_lab[:, :, 1:]), axis=2)
    im = cv2.cvtColor(im_lab, cv2.COLOR_LAB2BGR)
    im *= 255.
    return im 
開發者ID:Sundrops,項目名稱:pytorch-faster-rcnn,代碼行數:28,代碼來源:common.py

示例9: increase_contrast

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def increase_contrast(img):
    """
    Increase contrast of an RGB image
    @Author: Appcell
    @param img: image to be processed
    @return: a numpy.ndarray object of this image
    """
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(4, 4))
    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    return final 
開發者ID:appcell,項目名稱:OverwatchDataAnalysis,代碼行數:16,代碼來源:image.py

示例10: color_transfer_mix

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def color_transfer_mix(img_src,img_trg):
    img_src = np.clip(img_src*255.0, 0, 255).astype(np.uint8)
    img_trg = np.clip(img_trg*255.0, 0, 255).astype(np.uint8)

    img_src_lab = cv2.cvtColor(img_src, cv2.COLOR_BGR2LAB)
    img_trg_lab = cv2.cvtColor(img_trg, cv2.COLOR_BGR2LAB)

    rct_light = np.clip ( linear_color_transfer(img_src_lab[...,0:1].astype(np.float32)/255.0,
                                                img_trg_lab[...,0:1].astype(np.float32)/255.0 )[...,0]*255.0,
                          0, 255).astype(np.uint8)

    img_src_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8)
    img_src_lab = cv2.cvtColor(img_src_lab, cv2.COLOR_LAB2BGR)

    img_trg_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8)
    img_trg_lab = cv2.cvtColor(img_trg_lab, cv2.COLOR_LAB2BGR)

    img_rct = color_transfer_sot( img_src_lab.astype(np.float32), img_trg_lab.astype(np.float32) )
    img_rct = np.clip(img_rct, 0, 255).astype(np.uint8)

    img_rct = cv2.cvtColor(img_rct, cv2.COLOR_BGR2LAB)
    img_rct[...,0] = rct_light
    img_rct = cv2.cvtColor(img_rct, cv2.COLOR_LAB2BGR)


    return (img_rct / 255.0).astype(np.float32) 
開發者ID:iperov,項目名稱:DeepFaceLab,代碼行數:28,代碼來源:color_transfer.py

示例11: upsample_color_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def upsample_color_image(grayscale_highres, color_lowres_bgr, colorspace='LAB'):
    """
    Generate a high res color image from a high res grayscale image, and a low res color image,
    using the trick described in:
    http://www.planetary.org/blogs/emily-lakdawalla/2013/04231204-image-processing-colorizing-images.html
    """
    assert(len(grayscale_highres.shape) == 2)
    assert(len(color_lowres_bgr.shape) == 3 and color_lowres_bgr.shape[2] == 3)

    if colorspace == 'LAB':
        # convert color image to LAB space
        lab = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2LAB)
        # replace lightness channel with the highres image
        lab[:, :, 0] = grayscale_highres
        # convert back to BGR
        color_highres_bgr = cv2.cvtColor(src=lab, code=cv2.COLOR_LAB2BGR)
    elif colorspace == 'HSV':
        # convert color image to HSV space
        hsv = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HSV)
        # replace value channel with the highres image
        hsv[:, :, 2] = grayscale_highres
        # convert back to BGR
        color_highres_bgr = cv2.cvtColor(src=hsv, code=cv2.COLOR_HSV2BGR)
    elif colorspace == 'HLS':
        # convert color image to HLS space
        hls = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HLS)
        # replace lightness channel with the highres image
        hls[:, :, 1] = grayscale_highres
        # convert back to BGR
        color_highres_bgr = cv2.cvtColor(src=hls, code=cv2.COLOR_HLS2BGR)

    return color_highres_bgr 
開發者ID:uzh-rpg,項目名稱:rpg_e2vid,代碼行數:34,代碼來源:inference_utils.py

示例12: clahe

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def clahe(img, clip=2, grid=8):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    _clahe = cv2.createCLAHE(clipLimit=clip, tileGridSize=(grid, grid))
    img_yuv[:, :, 0] = _clahe.apply(img_yuv[:, :, 0])
    img_equ = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2BGR)
    return img_equ 
開發者ID:nyoki-mtl,項目名稱:pytorch-segmentation,代碼行數:8,代碼來源:preprocess.py

示例13: color_transfer

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def color_transfer(source, target):

    source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
    target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")

    (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
    (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)

    (l, a, b) = cv2.split(target)
    l -= lMeanTar
    a -= aMeanTar
    b -= bMeanTar

    l = (lStdTar / lStdSrc) * l
    a = (aStdTar / aStdSrc) * a
    b = (bStdTar / bStdSrc) * b

    l += lMeanSrc
    a += aMeanSrc
    b += bMeanSrc

    l = np.clip(l, 0, 255)
    a = np.clip(a, 0, 255)
    b = np.clip(b, 0, 255)

    transfer = cv2.merge([l, a, b])
    transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)

    return transfer 
開發者ID:meizhoubao,項目名稱:pyimagesearch,代碼行數:31,代碼來源:color_transfer.py

示例14: rgb_clahe

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def rgb_clahe(in_rgb_img):
    bgr = in_rgb_img[:,:,[2,1,0]] # flip r and b
    lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    lab[:,:,0] = clahe.apply(lab[:,:,0])
    bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    return bgr[:,:,[2,1,0]] 
開發者ID:nicolefinnie,項目名稱:kaggle-dsb2018,代碼行數:9,代碼來源:image_processing.py

示例15: distort_color

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_LAB2BGR [as 別名]
def distort_color(im):
    # distort brightness
    hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    h_, s_, v_ = cv2.split(hsv)
    v_ += np.random.randint(-16, 16)
    v_[v_ > 255] = 255
    v_[v_ < 0] = 0
    hsv = cv2.merge((h_, s_, v_))
    im = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    # distort contrast
    # """
    # From TF source code
    #   For each channel, this Op computes the mean of the image pixels in the
    # channel and then adjusts each component `x` of each pixel to
    # `(x - mean) * contrast_factor + mean`.
    # """
    im = im.astype(np.float32)
    b, g, r = cv2.split(im)
    # factor = (np.random.rand() + 0.5)
    factor = np.random.uniform(0.75, 1.25)
    b = (b - b.mean()) * factor + b.mean()
    b[b > 255] = 255
    b[b < 0] = 0
    # factor = (np.random.rand() + 0.5)
    g = (g - g.mean()) * factor + g.mean()
    g[g > 255] = 255
    g[g < 0] = 0
    # factor = (np.random.rand() + 0.5)
    r = (r - r.mean()) * factor + r.mean()
    r[r > 255] = 255
    r[r < 0] = 0
    im = cv2.merge((b, g, r))

    # im = im.astype(np.uint8)
    # clip_value = np.random.rand() * 3.0
    # clahe = cv2.createCLAHE(clipLimit=clip_value, tileGridSize=(8, 8))
    # lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)  # convert from BGR to LAB color space
    # l, a, b = cv2.split(lab)  # split on 3 different channels
    # l2 = clahe.apply(l)  # apply CLAHE to the L-channel
    # lab = cv2.merge((l2, a, b))  # merge channels
    # im = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)  # convert from LAB to BGR
    # im = im.astype(np.float32)

    return im 
開發者ID:CharlesShang,項目名稱:Detectron-PYTORCH,代碼行數:47,代碼來源:fixed_size.py


注:本文中的cv2.COLOR_LAB2BGR屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。