当前位置: 首页>>代码示例>>Python>>正文


Python cv2.COLOR_RGB2HLS属性代码示例

本文整理汇总了Python中cv2.COLOR_RGB2HLS属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_RGB2HLS属性的具体用法?Python cv2.COLOR_RGB2HLS怎么用?Python cv2.COLOR_RGB2HLS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_RGB2HLS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: color_grid_thresh

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def color_grid_thresh(img, s_thresh=(170,255), sx_thresh=(20, 100)):
	img = np.copy(img)
	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# Threshold color channel
	s_binary = np.zeros_like(s_channel)
	s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

	# combine the two binary
	binary = sxbinary | s_binary

	# Stack each channel (for visual check the pixal sourse)
	# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
	return binary 
开发者ID:ChengZhongShen,项目名称:Advanced_Lane_Lines,代码行数:27,代码来源:image_process.py

示例2: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def __call__(self, data):
        h, w, c = data.shape
        
        assert c%3 == 0, "input channel = %d, illegal"%c
        random_vars = [int(round(self.rng.uniform(-x, x))) for x in self.vars]

        base = len(random_vars)
        augmented_data = np.zeros(data.shape, )
        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(data[:,:,3*i_im:(3*i_im+3)], cv2.COLOR_RGB2HLS)

        hls_limits = [180, 255, 255]
        for ic in range(0, c):
            var = random_vars[ic%base]
            limit = hls_limits[ic%base]
            augmented_data[:,:,ic] = np.minimum(np.maximum(augmented_data[:,:,ic] + var, 0), limit)

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(augmented_data[:,:,3*i_im:(3*i_im+3)].astype(np.uint8), \
                        cv2.COLOR_HLS2RGB)

        return augmented_data 
开发者ID:facebookresearch,项目名称:dmc-net,代码行数:26,代码来源:image_transforms.py

示例3: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def __call__(self, data, idx=None, copy_id=0):
        h, w, c = data.shape
        assert c%3 == 0, "input channel = %d, illegal"%c

        random_vars = [int(self.rng.uniform(-x, x)) for x in self.vars]

        base = len(random_vars)
        augmented_data = np.zeros(data.shape, )

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(data[:,:,3*i_im:(3*i_im+3)], cv2.COLOR_RGB2HLS)

        hls_limits = [180, 255, 255]
        for ic in range(0, c):
            var = random_vars[ic%base]
            limit = hls_limits[ic%base]
            augmented_data[:,:,ic] = np.minimum(np.maximum(augmented_data[:,:,ic] + var, 0), limit)

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(augmented_data[:,:,3*i_im:(3*i_im+3)].astype(np.uint8), \
                        cv2.COLOR_HLS2RGB)

        return augmented_data 
开发者ID:facebookresearch,项目名称:GloRe,代码行数:27,代码来源:image_transforms.py

示例4: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def __call__(self, data):
        h, w, c = data.shape
        assert c%3 == 0, "input channel = %d, illegal"%c

        random_vars = [int(round(self.rng.uniform(-x, x))) for x in self.vars]

        base = len(random_vars)
        augmented_data = np.zeros(data.shape, )

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(data[:,:,3*i_im:(3*i_im+3)], cv2.COLOR_RGB2HLS)

        hls_limits = [180, 255, 255]
        for ic in range(0, c):
            var = random_vars[ic%base]
            limit = hls_limits[ic%base]
            augmented_data[:,:,ic] = np.minimum(np.maximum(augmented_data[:,:,ic] + var, 0), limit)

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(augmented_data[:,:,3*i_im:(3*i_im+3)].astype(np.uint8), \
                        cv2.COLOR_HLS2RGB)

        return augmented_data 
开发者ID:cypw,项目名称:PyTorch-MFNet,代码行数:27,代码来源:image_transforms.py

示例5: get_statistics

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def get_statistics(img):
    img = np.clip(img, a_min=0.0, a_max=1.0)
    HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    lum = img[:, :, 0] * 0.27 + img[:, :, 1] * 0.67 + img[:, :, 2] * 0.06
    sat = HLS[:, :, 2].mean()
    return [lum.mean(), lum.std() * 2, sat] 
开发者ID:yuanming-hu,项目名称:exposure,代码行数:8,代码来源:histogram_intersection.py

示例6: color_grid_thresh_dynamic

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def color_grid_thresh_dynamic(img, s_thresh=(170,255), sx_thresh=(20, 100)):
	img = np.copy(img)
	height = img.shape[0]
	width = img.shape[1]
	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# Threshold color channel
	s_binary = np.zeros_like(s_channel)
	s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

	sxbinary[:, :width//2] = 0	# use the left side
	s_binary[:,width//2:] = 0 # use the right side

	# combine the two binary
	binary = sxbinary | s_binary

	# Stack each channel (for visual check the pixal sourse)
	# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
	return binary 
开发者ID:ChengZhongShen,项目名称:Advanced_Lane_Lines,代码行数:32,代码来源:image_process.py

示例7: yellow_grid_thresh

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def yellow_grid_thresh(img, y_low=(10,50,0), y_high=(30,255,255), sx_thresh=(20, 100)):
	img = np.copy(img)
	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# # Threshold color channel
	# s_binary = np.zeros_like(s_channel)
	# s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
	
	yellow_filtered = yellow_filter(img, y_low, y_high)
	yellow_filtered[yellow_filtered > 0] = 1 # transfer to binary

	# combine the two binary, right and left
	sxbinary[:,:640] = 0 # use right side of sxbinary
	yellow_filtered[:,640:] = 0 # use left side of yellow filtered


	binary = sxbinary | yellow_filtered

	# Stack each channel (for visual check the pixal sourse)
	# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
	return binary 
开发者ID:ChengZhongShen,项目名称:Advanced_Lane_Lines,代码行数:34,代码来源:image_process.py

示例8: test_thresh_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def test_thresh_image(image, s_thresh, sx_thresh):
	"""
	adjust the thresh parameters
	"""
	img = mpimg.imread(image)
	img_threshed = color_grid_thresh(img, s_thresh=s_thresh, sx_thresh=sx_thresh)

	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# Threshold color channel
	s_binary = np.zeros_like(s_channel)
	s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

	# combine the two binary
	binary = sxbinary | s_binary

	
	plt.figure(),plt.imshow(img),plt.title("original")
	plt.figure(),plt.imshow(sxbinary, cmap='gray'),plt.title("x-gradient")
	plt.figure(),plt.imshow(s_binary, cmap='gray'),plt.title("color-threshed")
	plt.figure(),plt.imshow(s_channel, cmap='gray'),plt.title("s_channel")

	plt.figure(),plt.imshow(img_threshed, cmap='gray'),plt.title("combined-threshed")
	plt.show() 
开发者ID:ChengZhongShen,项目名称:Advanced_Lane_Lines,代码行数:37,代码来源:image_process.py

示例9: test_random_colorspace

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def test_random_colorspace(self):
        def _images_to_cspaces(images, choices):
            result = np.full((len(images),), -1, dtype=np.int32)
            for i, image_aug in enumerate(images):
                for j, choice in enumerate(choices):
                    if np.array_equal(image_aug, choice):
                        result[i] = j
                        break
            assert np.all(result != -1)
            return result

        image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))
        expected_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:, :, 2:2+1]
        expected_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)[:, :, 1:1+1]

        child = _BatchCapturingDummyAugmenter()
        aug = iaa.WithBrightnessChannels(
            children=child,
            to_colorspace=[iaa.CSPACE_HSV, iaa.CSPACE_HLS])

        images = [np.copy(image) for _ in sm.xrange(100)]

        _ = aug(images=images)
        images_aug1 = child.last_batch.images

        _ = aug(images=images)
        images_aug2 = child.last_batch.images

        cspaces1 = _images_to_cspaces(images_aug1, [expected_hsv, expected_hls])
        cspaces2 = _images_to_cspaces(images_aug2, [expected_hsv, expected_hls])

        assert np.any(cspaces1 != cspaces2)
        assert len(np.unique(cspaces1)) > 1
        assert len(np.unique(cspaces2)) > 1 
开发者ID:aleju,项目名称:imgaug,代码行数:36,代码来源:test_color.py

示例10: test_basic_functionality

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def test_basic_functionality(self):
        # basic functionality test
        aug = iaa.FastSnowyLandscape(
            lightness_threshold=100,
            lightness_multiplier=2.0)
        image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
        image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
        mask = (image_hls[..., 1] < 100)
        expected = np.copy(image_hls).astype(np.float32)
        expected[..., 1][mask] *= 2.0
        expected = np.clip(np.round(expected), 0, 255).astype(np.uint8)
        expected = cv2.cvtColor(expected, cv2.COLOR_HLS2RGB)
        observed = aug.augment_image(image)
        assert np.array_equal(observed, expected) 
开发者ID:aleju,项目名称:imgaug,代码行数:16,代码来源:test_weather.py

示例11: test_vary_lightness_threshold

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def test_vary_lightness_threshold(self):
        # test when varying lightness_threshold between images
        image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
        image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)

        aug = iaa.FastSnowyLandscape(
            lightness_threshold=_TwoValueParam(75, 125),
            lightness_multiplier=2.0)

        mask = (image_hls[..., 1] < 75)
        expected1 = np.copy(image_hls).astype(np.float64)
        expected1[..., 1][mask] *= 2.0
        expected1 = np.clip(np.round(expected1), 0, 255).astype(np.uint8)
        expected1 = cv2.cvtColor(expected1, cv2.COLOR_HLS2RGB)

        mask = (image_hls[..., 1] < 125)
        expected2 = np.copy(image_hls).astype(np.float64)
        expected2[..., 1][mask] *= 2.0
        expected2 = np.clip(np.round(expected2), 0, 255).astype(np.uint8)
        expected2 = cv2.cvtColor(expected2, cv2.COLOR_HLS2RGB)

        observed = aug.augment_images([image] * 4)

        assert np.array_equal(observed[0], expected1)
        assert np.array_equal(observed[1], expected2)
        assert np.array_equal(observed[2], expected1)
        assert np.array_equal(observed[3], expected2) 
开发者ID:aleju,项目名称:imgaug,代码行数:29,代码来源:test_weather.py

示例12: test_vary_lightness_multiplier

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def test_vary_lightness_multiplier(self):
        # test when varying lightness_multiplier between images
        image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
        image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)

        aug = iaa.FastSnowyLandscape(
            lightness_threshold=100,
            lightness_multiplier=_TwoValueParam(1.5, 2.0))

        mask = (image_hls[..., 1] < 100)
        expected1 = np.copy(image_hls).astype(np.float64)
        expected1[..., 1][mask] *= 1.5
        expected1 = np.clip(np.round(expected1), 0, 255).astype(np.uint8)
        expected1 = cv2.cvtColor(expected1, cv2.COLOR_HLS2RGB)

        mask = (image_hls[..., 1] < 100)
        expected2 = np.copy(image_hls).astype(np.float64)
        expected2[..., 1][mask] *= 2.0
        expected2 = np.clip(np.round(expected2), 0, 255).astype(np.uint8)
        expected2 = cv2.cvtColor(expected2, cv2.COLOR_HLS2RGB)

        observed = aug.augment_images([image] * 4)

        assert np.array_equal(observed[0], expected1)
        assert np.array_equal(observed[1], expected2)
        assert np.array_equal(observed[2], expected1)
        assert np.array_equal(observed[3], expected2) 
开发者ID:aleju,项目名称:imgaug,代码行数:29,代码来源:test_weather.py

示例13: random_shadow

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def random_shadow(image):
    """
    Generates and adds random shadow
    """
    # (x1, y1) and (x2, y2) forms a line
    # xm, ym gives all the locations of the image
    x1, y1 = IMAGE_WIDTH * np.random.rand(), 0
    x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT
    xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]

    # mathematically speaking, we want to set 1 below the line and zero otherwise
    # Our coordinate is up side down.  So, the above the line:
    # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)
    # as x2 == x1 causes zero-division problem, we'll write it in the below form:
    # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0
    mask = np.zeros_like(image[:, :, 1])
    mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1

    # choose which side should have shadow and adjust saturation
    cond = mask == np.random.randint(2)
    s_ratio = np.random.uniform(low=0.2, high=0.5)

    # adjust Saturation in HLS(Hue, Light, Saturation)
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
    return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) 
开发者ID:BerkeleyLearnVerify,项目名称:VerifAI,代码行数:28,代码来源:utils.py

示例14: random_color_warp

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def random_color_warp(image, d_h=None, d_s=None, d_l=None):
    """ Given an RGB image [H x W x 3], add random hue, saturation and luminosity to the image

        Code adapted from: https://github.com/yuxng/PoseCNN/blob/master/lib/utils/blob.py
    """
    H, W, _ = image.shape

    image_color_warped = np.zeros_like(image)

    # Set random hue, luminosity and saturation which ranges from -0.1 to 0.1
    if d_h is None:
        d_h = (random.random() - 0.5) * 0.2 * 256
    if d_l is None:
        d_l = (random.random() - 0.5) * 0.2 * 256
    if d_s is None:
        d_s = (random.random() - 0.5) * 0.2 * 256

    # Convert the RGB to HLS
    hls = cv2.cvtColor(image.round().astype(np.uint8), cv2.COLOR_RGB2HLS)
    h, l, s = cv2.split(hls)

    # Add the values to the image H, L, S
    new_h = (np.round((h + d_h)) % 256).astype(np.uint8)
    new_l = np.round(np.clip(l + d_l, 0, 255)).astype(np.uint8)
    new_s = np.round(np.clip(s + d_s, 0, 255)).astype(np.uint8)

    # Convert the HLS to RGB
    new_hls = cv2.merge((new_h, new_l, new_s)).astype(np.uint8)
    new_im = cv2.cvtColor(new_hls, cv2.COLOR_HLS2RGB)

    image_color_warped = new_im.astype(np.float32)

    return image_color_warped 
开发者ID:chrisdxie,项目名称:uois,代码行数:35,代码来源:data_augmentation.py

示例15: random_shadow

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2HLS [as 别名]
def random_shadow(image):
    """
    Generates and adds random shadow
    """
    # (x1, y1) and (x2, y2) forms a line
    # xm, ym gives all the locations of the image
    x1, y1 = IMAGE_WIDTH * np.random.rand(), 0
    x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT
    xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]

    # mathematically speaking, we want to set 1 below the line and zero otherwise
    # Our coordinate is up side down.  So, the above the line: 
    # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)
    # as x2 == x1 causes zero-division problem, we'll write it in the below form:
    # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0
    mask = np.zeros_like(image[:, :, 1])
    mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1

    # choose which side should have shadow and adjust saturation
    cond = mask == np.random.randint(2)
    s_ratio = np.random.uniform(low=0.2, high=0.5)

    # adjust Saturation in HLS(Hue, Light, Saturation)
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
    return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) 
开发者ID:naokishibuya,项目名称:car-behavioral-cloning,代码行数:28,代码来源:utils.py


注:本文中的cv2.COLOR_RGB2HLS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。