當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.COLOR_BGR2Lab方法代碼示例

本文整理匯總了Python中cv2.COLOR_BGR2Lab方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_BGR2Lab方法的具體用法?Python cv2.COLOR_BGR2Lab怎麽用?Python cv2.COLOR_BGR2Lab使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_BGR2Lab方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2Lab [as 別名]
def create_mask(self, img, color):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)

        if color == 'green':
            threshold = [(20, 0, 128), (235, 128, 255)]
        elif color == 'white':
            threshold = [(100, 110, 110), (200, 140, 140)]

        else:
            raise Exception('Color undefined')
        
        mask = cv2.inRange(img, threshold[0], threshold[1])
        # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
        # mask =  cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        # mask =  cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

        mask = mask > 0

        # img = cv2.cvtColor(img, cv2.COLOR_YCR_CB2BGR)

        # thres_img = np.zeros_like(img, np.uint8)
        # thres_img[mask] = img[mask]

        binary_img = np.zeros((img.shape[0],img.shape[1]), np.uint8)
        binary_img[mask] = 255

        # cv2.imshow('img', binary_img)
        # cv2.waitKey(0)
        # exit(0)

        return mask 
開發者ID:zuoym15,項目名稱:craves.ai,代碼行數:33,代碼來源:imutils.py

示例2: init

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2Lab [as 別名]
def init(self,first_frame,bbox):
        bbox=np.array(bbox).astype(np.int64)
        x,y,w,h=tuple(bbox)
        self._scale_factor=min(1,round(10*self.config.img_scale_target_diagonal/cv2.norm(np.array([w,h])))/10.)
        self._center=(self._scale_factor*(x+(w-1)/2),self._scale_factor*(y+(h-1)/2))
        self.w,self.h=int(w*self._scale_factor),int(h*self._scale_factor)
        self._target_sz=(self.w,self.h)

        img=cv2.resize(first_frame,None,fx=self._scale_factor,fy=self._scale_factor)
        if self.config.color_space=='lab':
            img=cv2.cvtColor(img,cv2.COLOR_BGR2Lab)
        elif self.config.color_space=='hsv':
            img=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
            img[:, :, 0] = (img[:, :, 0] * 256 / 180)
            img = img.astype(np.uint8)
        else:
            pass

        surr_sz=(int(np.floor(self.config.surr_win_factor*self.w)),int(np.floor(self.config.surr_win_factor*self.h)))
        surr_rect=pos2rect(self._center,surr_sz,(img.shape[1],img.shape[0]))
        obj_rect_surr=pos2rect(self._center,self._target_sz,(img.shape[1],img.shape[0]))
        obj_rect_surr=(obj_rect_surr[0]-surr_rect[0],
                    obj_rect_surr[1]-surr_rect[1],
                    obj_rect_surr[2],obj_rect_surr[3])
        surr_win=get_sub_window(img,self._center,surr_sz)
        self.bin_mapping=get_bin_mapping(self.config.num_bins)
        self.prob_lut_,prob_map=get_foreground_background_probs(surr_win,obj_rect_surr,
                                self.config.num_bins,self.bin_mapping)
        self._prob_lut_distractor=copy.deepcopy(self.prob_lut_)
        self._prob_lut_masked=copy.deepcopy(self.prob_lut_)
        self.adaptive_threshold_=get_adaptive_threshold(prob_map,obj_rect_surr)
        self.target_pos_history.append((self._center[0]/self._scale_factor,self._center[1]/self._scale_factor))
        self.target_sz_history.append((self._target_sz[0]/self._scale_factor,self._target_sz[1]/self._scale_factor)) 
開發者ID:fengyang95,項目名稱:pyCFTrackers,代碼行數:35,代碼來源:dat.py

示例3: equalize_clahe_color_lab

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2Lab [as 別名]
def equalize_clahe_color_lab(img):
    """Equalize the image splitting it after conversion to LAB and applying CLAHE
    to the L channel and merging the channels and convert back to BGR
    """

    cla = cv2.createCLAHE(clipLimit=4.0)
    L, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab))
    eq_L = cla.apply(L)
    eq_image = cv2.cvtColor(cv2.merge([eq_L, a, b]), cv2.COLOR_Lab2BGR)
    return eq_image 
開發者ID:PacktPublishing,項目名稱:Mastering-OpenCV-4-with-Python,代碼行數:12,代碼來源:clahe_histogram_equalization.py

示例4: convert_rgb2lab

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2Lab [as 別名]
def convert_rgb2lab( images, batch_size): # [128, 3, 32, 32]
  """
  INPUT: images should be NCHW
  AB channel values are in the range [-128,128]
  L channel values are in the range [0,100]
  """
  images_np = images.numpy()
  images_np_nhwc = np.rollaxis(images_np,1,4) # NCHW to NHWC
  images_LAB = torch.FloatTensor( images.size() ).zero_() # empty NCHW array to hold LAB
  for i in range( images_np_nhwc.shape[0] ):
     img_lab = cv2.cvtColor(images_np_nhwc[i], cv2.COLOR_BGR2Lab ) # HWC
     images_LAB[i] = torch.from_numpy( np.rollaxis( img_lab, 2, 0 ) ) # to CHW
  images_L = images_LAB[:,0,:,:].contiguous().view(images.size(0), 1, images.size(2), images.size(3) ) # channel 0
  images_AB = images_LAB[:,1:,:,:] # channels 1 and 2
  return images_L, images_AB 
開發者ID:johnwlambert,項目名稱:dlupi-heteroscedastic-dropout,代碼行數:17,代碼來源:img_proc.py

示例5: hair

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2Lab [as 別名]
def hair(image, parsing, part=17, color=[230, 50, 20]):
    b, g, r = color      #[10, 50, 250]       # [10, 250, 10]
    tar_color = np.zeros_like(image)
    tar_color[:, :, 0] = b
    tar_color[:, :, 1] = g
    tar_color[:, :, 2] = r

    image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV)

    if part == 12 or part == 13:
        image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2]
    else:
        image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1]

    changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)

    if part == 17:
        changed = sharpen(changed)

    changed[parsing != part] = image[parsing != part]
    # changed = cv2.resize(changed, (512, 512))
    return changed

#
# def lip(image, parsing, part=17, color=[230, 50, 20]):
#     b, g, r = color      #[10, 50, 250]       # [10, 250, 10]
#     tar_color = np.zeros_like(image)
#     tar_color[:, :, 0] = b
#     tar_color[:, :, 1] = g
#     tar_color[:, :, 2] = r
#
#     image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
#     il, ia, ib = cv2.split(image_lab)
#
#     tar_lab = cv2.cvtColor(tar_color, cv2.COLOR_BGR2Lab)
#     tl, ta, tb = cv2.split(tar_lab)
#
#     image_lab[:, :, 0] = np.clip(il - np.mean(il) + tl, 0, 100)
#     image_lab[:, :, 1] = np.clip(ia - np.mean(ia) + ta, -127, 128)
#     image_lab[:, :, 2] = np.clip(ib - np.mean(ib) + tb, -127, 128)
#
#
#     changed = cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR)
#
#     if part == 17:
#         changed = sharpen(changed)
#
#     changed[parsing != part] = image[parsing != part]
#     # changed = cv2.resize(changed, (512, 512))
#     return changed 
開發者ID:zllrunning,項目名稱:face-parsing.PyTorch,代碼行數:53,代碼來源:makeup.py


注:本文中的cv2.COLOR_BGR2Lab方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。