當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.COLOR_RGB2LAB屬性代碼示例

本文整理匯總了Python中cv2.COLOR_RGB2LAB屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_RGB2LAB屬性的具體用法?Python cv2.COLOR_RGB2LAB怎麽用?Python cv2.COLOR_RGB2LAB使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_RGB2LAB屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_tissue_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def get_tissue_mask(I, luminosity_threshold=0.8):
        """
        Get a binary mask where true denotes pixels with a luminosity less than the specified threshold.
        Typically we use to identify tissue in the image and exclude the bright white background.

        :param I: RGB uint 8 image.
        :param luminosity_threshold: Luminosity threshold.
        :return: Binary mask.
        """
        assert is_uint8_image(I), "Image should be RGB uint8."
        I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        L = I_LAB[:, :, 0] / 255.0  # Convert to range [0,1].
        mask = L < luminosity_threshold

        # Check it's not empty
        if mask.sum() == 0:
            raise TissueMaskException("Empty tissue mask computed")

        return mask 
開發者ID:Peter554,項目名稱:StainTools,代碼行數:21,代碼來源:luminosity_threshold_tissue_locator.py

示例2: standardize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def standardize(I, percentile=95):
        """
        Transform image I to standard brightness.
        Modifies the luminosity channel such that a fixed percentile is saturated.

        :param I: Image uint8 RGB.
        :param percentile: Percentile for luminosity saturation. At least (100 - percentile)% of pixels should be fully luminous (white).
        :return: Image uint8 RGB with standardized brightness.
        """
        assert is_uint8_image(I), "Image should be RGB uint8."
        I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        L_float = I_LAB[:, :, 0].astype(float)
        p = np.percentile(L_float, percentile)
        I_LAB[:, :, 0] = np.clip(255 * L_float / p, 0, 255).astype(np.uint8)
        I = cv.cvtColor(I_LAB, cv.COLOR_LAB2RGB)
        return I 
開發者ID:Peter554,項目名稱:StainTools,代碼行數:18,代碼來源:luminosity_standardizer.py

示例3: renderEnvLuminosityNoise

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
        """
        render the different environment luminosity
        """
        # variate luminosity and color
        origin_image_LAB = cv2.cvtColor(
            origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
        origin_image_LAB[:, :, 0] = origin_image_LAB[:,
                                                     :, 0] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 1] = origin_image_LAB[:,
                                                     :, 1] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 2] = origin_image_LAB[:,
                                                     :, 2] * (np.random.randn() * noise_var + 1.0)
        out_image = cv2.cvtColor(
            origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
        return out_image 
開發者ID:araffin,項目名稱:robotics-rl-srl,代碼行數:18,代碼來源:omnirobot_simulator_server.py

示例4: clahe

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)):
    if img.dtype != np.uint8:
        raise TypeError("clahe supports only uint8 inputs")

    clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)

    if len(img.shape) == 2 or img.shape[2] == 1:
        img = clahe_mat.apply(img)
    else:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
        img[:, :, 0] = clahe_mat.apply(img[:, :, 0])
        img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)

    return img 
開發者ID:albumentations-team,項目名稱:albumentations,代碼行數:16,代碼來源:functional.py

示例5: rgb2Lab

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def rgb2Lab(img):
    img_rgb = rgb(img)
    Lab = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2LAB)
    return Lab


## Lab to RGB. 
開發者ID:tody411,項目名稱:ColorHistogram,代碼行數:9,代碼來源:image.py

示例6: lab_split

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def lab_split(I):
        """
        Convert from RGB uint8 to LAB and split into channels.

        :param I: Image RGB uint8.
        :return:
        """
        assert is_uint8_image(I), "Should be a RGB uint8 image"
        I = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        I_float = I.astype(np.float32)
        I1, I2, I3 = cv.split(I_float)
        I1 /= 2.55  # should now be in range [0,100]
        I2 -= 128.0  # should now be in range [-127,127]
        I3 -= 128.0  # should now be in range [-127,127]
        return I1, I2, I3 
開發者ID:Peter554,項目名稱:StainTools,代碼行數:17,代碼來源:reinhard_color_normalizer.py

示例7: clahe

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
開發者ID:SpaceNetChallenge,項目名稱:SpaceNet_Off_Nadir_Solutions,代碼行數:8,代碼來源:train154_9ch_fold.py

示例8: __MR_readimg

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def __MR_readimg(self,img):
        if isinstance(img,str): # a image path
            img = cv2.imread(img, _cv2_LOAD_IMAGE_COLOR)
        img = cv2.cvtColor(img,cv2.COLOR_RGB2LAB).astype(float)/255
        h = 100
        w = int(float(h)/float(img.shape[0])*float(img.shape[1]))
        return cv2.resize(img,(w,h)) 
開發者ID:ruanxiang,項目名稱:mr_saliency,代碼行數:9,代碼來源:MR.py

示例9: clahe

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
開發者ID:selimsef,項目名稱:dsb2018_topcoders,代碼行數:8,代碼來源:functional.py

示例10: add_channel

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def add_channel(img):
    lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(21, 21))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return np.dstack((img, lab)) 
開發者ID:selimsef,項目名稱:dsb2018_topcoders,代碼行數:9,代碼來源:functional.py

示例11: process

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def process(self, cv_before, name):

        k = self.k[0]
        kernel = np.ones((k, k), np.uint8)

        if name == 'Invert':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.bitwise_not(cv_before)
        elif name == 'Histogram Equalization':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            cv_after = clahe.apply(cv_before)
        elif name == 'Threshold':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            ret, cv_after = cv2.threshold(
                cv_before, k, 255, cv2.THRESH_BINARY)
        elif name == 'Gaussian Threshold':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.adaptiveThreshold(cv_before, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                             cv2.THRESH_BINARY, k, 2)
        elif name == 'HSV':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2HSV)
            lower_color = np.array([k - 35, 0, 0])
            upper_color = np.array([k + 35, 255, 255])
            cv_after = cv2.inRange(cv_before, lower_color, upper_color)
        elif name == 'LAB':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2LAB)
            L, a, b = cv2.split(cv_before)
            ret, cv_after = cv2.threshold(L, k, 255, cv2.THRESH_BINARY)
        elif name == 'Erosion':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.erode(cv_before, kernel, iterations=1)
        elif name == 'Dilation':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.dilate(cv_before, kernel, iterations=1)
        elif name == 'Opening':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_OPEN, kernel)
        elif name == 'Closing':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_CLOSE, kernel)
        elif name == 'Top Hat':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_TOPHAT, kernel)
        elif name == 'Black Hat':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_BLACKHAT, kernel)
        elif name == 'Canny':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.Canny(cv_before, 100, k)
        elif name == 'Laplacian':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.Laplacian(cv_before, cv2.CV_64F)
            cv_after = np.absolute(cv_after)
            cv_after = np.uint8(cv_after)

        return cv_after 
開發者ID:anonymouslycn,項目名稱:bjtu_BinocularCameraRecord,代碼行數:63,代碼來源:FilterCvQtContainer.py

示例12: test_every_colorspace

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def test_every_colorspace(self):
        def _image_to_channel(image, cspace):
            if cspace == iaa.CSPACE_YCrCb:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_HSV:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
                return image_cvt[:, :, 2:2+1]
            elif cspace == iaa.CSPACE_HLS:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
                return image_cvt[:, :, 1:1+1]
            elif cspace == iaa.CSPACE_Lab:
                if hasattr(cv2, "COLOR_RGB2Lab"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_Luv:
                if hasattr(cv2, "COLOR_RGB2Luv"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
                return image_cvt[:, :, 0:0+1]
            else:
                assert cspace == iaa.CSPACE_YUV
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
                return image_cvt[:, :, 0:0+1]

        # Max differences between input image and image after augmentation
        # when no child augmenter is used (for the given example image below).
        # For some colorspaces the conversion to input colorspace isn't
        # perfect.
        # Values were manually checked.
        max_diff_expected = {
            iaa.CSPACE_YCrCb: 1,
            iaa.CSPACE_HSV: 0,
            iaa.CSPACE_HLS: 0,
            iaa.CSPACE_Lab: 2,
            iaa.CSPACE_Luv: 4,
            iaa.CSPACE_YUV: 1
        }

        image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))

        for cspace in self.valid_colorspaces:
            with self.subTest(colorspace=cspace):
                child = _BatchCapturingDummyAugmenter()
                aug = iaa.WithBrightnessChannels(
                    children=child,
                    to_colorspace=cspace)

                image_aug = aug(image=image)

                expected = _image_to_channel(image, cspace)
                diff = np.abs(
                    image.astype(np.int32) - image_aug.astype(np.int32))
                assert np.all(diff <= max_diff_expected[cspace])
                assert np.array_equal(child.last_batch.images[0], expected) 
開發者ID:aleju,項目名稱:imgaug,代碼行數:60,代碼來源:test_color.py

示例13: __getitem__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def __getitem__(self, index):

        if self.is_train:
            ids = self.train[index]
        else:
            ids = self.valid[index]

        images = self.dataset.get_image([self.cam_name], [ids])
        img_path = images[0]

        img = load_image(img_path)  #CxHxW
        target = self.load_angles(img_path)

        original_size = np.array((img.shape[2], img.shape[1]))

        segmasks = self.dataset.get_seg([self.cam_name], [ids])
        segmask = io.imread(segmasks[0])

        binary_arm = vdb.get_obj_mask(segmask, self.color)
        bb = vdb.seg2bb(binary_arm)
        x0, x1, y0, y1 = bb
        

        c = np.array([(x0+x1), (y0+y1)])/2
        #s = np.sqrt((y1-y0)*(x1-x0))/120.0
        s = np.sqrt((y1-y0)*(x1-x0))/60.0
        r = 0

        #s = max(x1-x0, y1-y0)/125
        if self.is_train:
            c = c + np.array([-30 + 60*random.random() ,-30 + 60*random.random()]) #random move
            s *= 0.6*(1+2*random.random())#random scale
        
            rf = 15
            r = -rf + 2*random.random()*rf#random rotation
            #r = torch.randn(1).mul_(rf).clamp(-2*rf, 2*rf)[0] if random.random() <= 0.6 else 0

            # Color
            im_rgb = im_to_numpy(img)
            im_lab = cv2.cvtColor(im_rgb, cv2.COLOR_RGB2LAB)
            im_lab[:,:,0] = np.clip(im_lab[:,:,0]*(random.uniform(0.3, 1.3)), 0, 255)
            img = im_to_torch(cv2.cvtColor(im_lab, cv2.COLOR_LAB2RGB))

            if random.random() <= 0.5:
                img = torch.from_numpy(fliplr(img.numpy())).float()

        inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r)
        inp = color_normalize(inp, self.mean, self.std)

        return inp, target 
開發者ID:zuoym15,項目名稱:craves.ai,代碼行數:52,代碼來源:arm_resnet.py

示例14: filter_lane_points

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def filter_lane_points(self,
                           img,
                           filter_type='bilateral',
                           ksize_r=25,
                           C_r=8,
                           ksize_b=35,
                           C_b=5,
                           mask_noise=False,
                           ksize_noise=65,
                           C_noise=10,
                           noise_thresh=135):
        '''
        Filter an image to isolate lane lines and return a binary version.

        All image color space conversion, thresholding, filtering and morphing
        happens inside this method. It takes an RGB color image as input and
        returns a binary filtered version.
        '''

        # Define structuring elements for cv2 functions
        strel_lab_b = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(55,55))
        strel_rgb_r = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(29,29))
        strel_open = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(5,5))
        # Extract RGB R-channel and LAB B-channel
        rgb_r_channel = img[:,:,0]
        lab_b_channel = (cv2.cvtColor(img, cv2.COLOR_RGB2LAB))[:,:,2]
        # Apply tophat morphology
        rgb_r_tophat = cv2.morphologyEx(rgb_r_channel, cv2.MORPH_TOPHAT, strel_rgb_r, iterations=1)
        lab_b_tophat = cv2.morphologyEx(lab_b_channel, cv2.MORPH_TOPHAT, strel_lab_b, iterations=1)
        if filter_type == 'bilateral':
            # Apply bilateral adaptive color thresholding
            rgb_r_thresh = bilateral_adaptive_threshold(rgb_r_tophat, ksize=ksize_r, C=C_r)
            lab_b_thresh = bilateral_adaptive_threshold(lab_b_tophat, ksize=ksize_b, C=C_b)
        elif filter_type == 'neighborhood':
            rgb_r_thresh = cv2.adaptiveThreshold(rgb_r_channel, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=ksize_r, C=-C_r)
            lab_b_thresh = cv2.adaptiveThreshold(lab_b_channel, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=ksize_b, C=-C_b)
        else:
            raise ValueError("Unexpected filter mode. Expected modes are 'bilateral' or 'neighborhood'.")
        if mask_noise: # Merge both color channels and the noise mask
            # Create a mask to filter out noise such as trees and other greenery based on the LAB B-channel
            noise_mask_part1 = cv2.inRange(lab_b_channel, noise_thresh, 255) # This catches the noise, but unfortunately also the yellow line, therefore...
            noise_mask_part2 = bilateral_adaptive_threshold(lab_b_channel, ksize=ksize_noise, C=C_noise) # ...this brings the yellow line back...
            noise_bool = np.logical_or(np.logical_not(noise_mask_part1), noise_mask_part2) # ...once we combine the two.
            noise_mask = np.zeros_like(rgb_r_channel, dtype=np.uint8)
            noise_mask[noise_bool] = 255

            merged_bool = np.logical_and(np.logical_or(rgb_r_thresh, lab_b_thresh), noise_mask)
            merged = np.zeros_like(rgb_r_channel, dtype=np.uint8)
            merged[merged_bool] = 255
        else: # Only merge the two color channels
            merged_bool = np.logical_or(rgb_r_thresh, lab_b_thresh)
            merged = np.zeros_like(rgb_r_channel, dtype=np.uint8)
            merged[merged_bool] = 255

        # Apply open morphology
        opened = cv2.morphologyEx(merged, cv2.MORPH_OPEN, strel_open, iterations=1)

        return opened 
開發者ID:pierluigiferrari,項目名稱:lane_tracker,代碼行數:60,代碼來源:lane_tracker.py

示例15: returnMask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2LAB [as 別名]
def returnMask(self, image, tot_bins=8, format='BGR2LAB'):
        """ Return the saliency mask of the input image.
        
        @param: image the image to process
        @param: tot_bins the number of bins used in the histogram
        @param: format conversion, it can be one of the following:
            BGR2LAB, BGR2RGB, RGB2LAB, RGB, BGR, LAB
        @return: the saliency mask
        """
        if format == 'BGR2LAB':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        elif format == 'BGR2RGB':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        elif format == 'RGB2LAB':
            image = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
        elif format == 'RGB' or format == 'BGR' or format == 'LAB':
            pass
        else:
            raise ValueError('[DEEPGAZE][SALIENCY-MAP][ERROR] the input format of the image is not supported.')
        if DEBUG: start = timer()
        self._calculate_histogram(image, tot_bins=tot_bins)
        if DEBUG: end = timer()
        if DEBUG: print("--- %s calculate_histogram seconds ---" % (end - start))
        if DEBUG: start = timer()
        number_of_colors = self._precompute_parameters()
        if DEBUG: end = timer()
        if DEBUG: print("--- number of colors: " + str(number_of_colors) + " ---")
        if DEBUG: print("--- %s precompute_paramters seconds ---" % (end - start))
        if DEBUG: start = timer()
        self._bilateral_filtering()
        if DEBUG: end = timer()
        if DEBUG: print("--- %s bilateral_filtering seconds ---" % (end - start))
        if DEBUG: start = timer()
        self._calculate_probability()
        if DEBUG: end = timer()
        if DEBUG: print("--- %s calculate_probability seconds ---" % (end - start))
        if DEBUG: start = timer()
        self._compute_saliency_map()
        if DEBUG: end = timer()
        if DEBUG: print("--- %s compute_saliency_map seconds ---" % (end - start))
        if DEBUG: start = timer()
        it = np.nditer(self.salient_image, flags=['multi_index'], op_flags=['writeonly'])
        while not it.finished:
            # This part takes 0.1 seconds
            y = it.multi_index[0]
            x = it.multi_index[1]
            #L_id = self.L_id_matrix[y, x]
            #A_id = self.A_id_matrix[y, x]
            #B_id = self.B_id_matrix[y, x]
            index = self.image_quantized[y, x]
            # These operations take 0.1 seconds
            index = self.map_3d_1d[index[0], index[1], index[2]]
            it[0] = self.saliency[index]
            it.iternext()

        if DEBUG: end = timer()
        # ret, self.salient_image = cv2.threshold(self.salient_image, 150, 255, cv2.THRESH_BINARY)
        if DEBUG: print("--- %s returnMask 'iteration part' seconds ---" % (end - start))
        return self.salient_image 
開發者ID:mpatacchiola,項目名稱:deepgaze,代碼行數:61,代碼來源:saliency_map.py


注:本文中的cv2.COLOR_RGB2LAB屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。