本文整理汇总了Python中cv2.COLOR_BGR2HLS属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_BGR2HLS属性的具体用法?Python cv2.COLOR_BGR2HLS怎么用?Python cv2.COLOR_BGR2HLS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.COLOR_BGR2HLS属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: color_aug
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def color_aug(img, random_h=36, random_l=50, random_s=50):
img = cv2.cvtColor(img, cv2.COLOR_BGR2HLS).astype(float)
h = (random.random() * 2 - 1.0) * random_h
l = (random.random() * 2 - 1.0) * random_l
s = (random.random() * 2 - 1.0) * random_s
img[..., 0] += h
img[..., 0] = np.minimum(img[..., 0], 180)
img[..., 1] += l
img[..., 1] = np.minimum(img[..., 1], 255)
img[..., 2] += s
img[..., 2] = np.minimum(img[..., 2], 255)
img = np.maximum(img, 0)
img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_HLS2BGR)
return img
示例2: find_edges
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def find_edges(img, s_thresh=s_thresh, sx_thresh=sx_thresh, dir_thresh=dir_thresh):
img = np.copy(img)
# Convert to HSV color space and threshold the s channel
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS).astype(np.float)
s_channel = hls[:,:,2]
s_binary = threshold_col_channel(s_channel, thresh=s_thresh)
# Sobel x
sxbinary = abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=sx_thresh)
# mag_binary = mag_thresh(img, sobel_kernel=3, thresh=m_thresh)
# # gradient direction
dir_binary = dir_threshold(img, sobel_kernel=3, thresh=dir_thresh)
#
# # output mask
combined_binary = np.zeros_like(s_channel)
combined_binary[(( (sxbinary == 1) & (dir_binary==1) ) | ( (s_binary == 1) & (dir_binary==1) ))] = 1
# add more weights for the s channel
c_bi = np.zeros_like(s_channel)
c_bi[( (sxbinary == 1) & (s_binary==1) )] = 2
ave_binary = (combined_binary + c_bi)
return ave_binary
示例3: change_saturation
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def change_saturation(self):
if self.raw_image is None:
return 0
value = self.ui.horizontalSlider.value()
img_hsv = cv2.cvtColor(self.current_img, cv2.COLOR_BGR2HLS)
if value > 2:
img_hsv[:, :, 2] = np.log(img_hsv[:, :, 2] /255* (value - 1)+1) / np.log(value + 1) * 255
if value < 0:
img_hsv[:, :, 2] = np.uint8(img_hsv[:, :, 2] / np.log(- value + np.e))
self.current_img = cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)
# 明度调节
示例4: test_from_colorspace
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def test_from_colorspace(self):
# test BGR colorspace
aug = iaa.FastSnowyLandscape(
lightness_threshold=100,
lightness_multiplier=2.0,
from_colorspace="BGR")
image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
image_hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
mask = (image_hls[..., 1] < 100)
expected = np.copy(image_hls).astype(np.float32)
expected[..., 1][mask] *= 2.0
expected = np.clip(np.round(expected), 0, 255).astype(np.uint8)
expected = cv2.cvtColor(expected, cv2.COLOR_HLS2BGR)
observed = aug.augment_image(image)
assert np.array_equal(observed, expected)
示例5: test_detect
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def test_detect():
dev = AndroidDeviceMinicap()
dev._adb.start_minitouch()
time.sleep(3)
d = SceneDetector('txxscene')
old, new = None, None
while True:
# time.sleep(0.3)
screen = dev.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
# find hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
_, _, V = cv2.split(hsv)
V[V<150] = 0
cv2.imshow('V', V)
_, _, L = cv2.split(hls)
L[L<150] = 0
cv2.imshow('H', L)
tic = time.clock()
new = str(d.detect(img))
t = time.clock() - tic
if new != old:
print 'change to', new
print 'cost time', t
old = new
for _, r in d.current_scene:
x, y, x1, y1 = r
cv2.rectangle(img, (x,y), (x1,y1), (0,255,0) ,2)
cv2.imshow('test', img)
cv2.waitKey(1)
示例6: change_darker
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def change_darker(self):
if self.raw_image is None:
return 0
value = self.ui.horizontalSlider_4.value()
img_hsv = cv2.cvtColor(self.current_img, cv2.COLOR_BGR2HLS)
if value > 3:
img_hsv[:, :, 1] = np.log(img_hsv[:, :, 1] /255* (value - 1)+1) / np.log(value + 1) * 255
if value < 0:
img_hsv[:, :, 1] = np.uint8(img_hsv[:, :, 1] / np.log(- value + np.e))
self.last_image = self.current_img
self.current_img = cv2.cvtColor(img_hsv, cv2.COLOR_HLS2BGR)
# 人脸识别
示例7: upsample_color_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2HLS [as 别名]
def upsample_color_image(grayscale_highres, color_lowres_bgr, colorspace='LAB'):
"""
Generate a high res color image from a high res grayscale image, and a low res color image,
using the trick described in:
http://www.planetary.org/blogs/emily-lakdawalla/2013/04231204-image-processing-colorizing-images.html
"""
assert(len(grayscale_highres.shape) == 2)
assert(len(color_lowres_bgr.shape) == 3 and color_lowres_bgr.shape[2] == 3)
if colorspace == 'LAB':
# convert color image to LAB space
lab = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2LAB)
# replace lightness channel with the highres image
lab[:, :, 0] = grayscale_highres
# convert back to BGR
color_highres_bgr = cv2.cvtColor(src=lab, code=cv2.COLOR_LAB2BGR)
elif colorspace == 'HSV':
# convert color image to HSV space
hsv = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HSV)
# replace value channel with the highres image
hsv[:, :, 2] = grayscale_highres
# convert back to BGR
color_highres_bgr = cv2.cvtColor(src=hsv, code=cv2.COLOR_HSV2BGR)
elif colorspace == 'HLS':
# convert color image to HLS space
hls = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HLS)
# replace lightness channel with the highres image
hls[:, :, 1] = grayscale_highres
# convert back to BGR
color_highres_bgr = cv2.cvtColor(src=hls, code=cv2.COLOR_HLS2BGR)
return color_highres_bgr