本文整理汇总了Python中cv2.COLOR_RGB2YUV属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_RGB2YUV属性的具体用法?Python cv2.COLOR_RGB2YUV怎么用?Python cv2.COLOR_RGB2YUV使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.COLOR_RGB2YUV属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_img_random_crop
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def get_img_random_crop(src, resize=512, crop=256):
'''Get & resize image and random crop'''
img = get_img(src)
img = resize_to(img, resize=resize)
offset_h = random.randint(0, (img.shape[0]-crop))
offset_w = random.randint(0, (img.shape[1]-crop))
img = img[offset_h:offset_h+crop, offset_w:offset_w+crop, :]
return img
# def preserve_colors(content_rgb, styled_rgb):
# """Extract luminance from styled image and apply colors from content"""
# if content_rgb.shape != styled_rgb.shape:
# new_shape = (content_rgb.shape[1], content_rgb.shape[0])
# styled_rgb = cv2.resize(styled_rgb, new_shape)
# styled_yuv = cv2.cvtColor(styled_rgb, cv2.COLOR_RGB2YUV)
# Y_s, U_s, V_s = cv2.split(styled_yuv)
# image_YUV = cv2.cvtColor(content_rgb, cv2.COLOR_RGB2YUV)
# Y_i, U_i, V_i = cv2.split(image_YUV)
# styled_rgb = cv2.cvtColor(np.stack([Y_s, U_i, V_i], axis=-1), cv2.COLOR_YUV2RGB)
# return styled_rgb
示例2: schedule_frame
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def schedule_frame(self, frame):
if frame.shape[0] != self._settings.fmt.pix.height:
raise Exception('frame height does not match the height of webcam device: {}!={}\n'.format(self._settings.fmt.pix.height, frame.shape[0]))
if frame.shape[1] != self._settings.fmt.pix.width:
raise Exception('frame width does not match the width of webcam device: {}!={}\n'.format(self._settings.fmt.pix.width, frame.shape[1]))
if frame.shape[2] != self._channels:
raise Exception('num frame channels does not match the num channels of webcam device: {}!={}\n'.format(self._channels, frame.shape[2]))
if cv2_imported:
# t1 = timeit.default_timer()
self._yuv = cv2.cvtColor(frame, cv2.COLOR_RGB2YUV)
# t2 = timeit.default_timer()
# sys.stderr.write('conversion time: {}\n'.format(t2-t1))
else:
# t1 = timeit.default_timer()
frame = np.concatenate((frame, self._ones), axis=2)
frame = np.dot(frame, self._rgb2yuv.T)
self._yuv[:,:,:] = np.clip(frame, 0, 255)
# t2 = timeit.default_timer()
# sys.stderr.write('conversion time: {}\n'.format(t2-t1))
# t1 = timeit.default_timer()
for i in range(self._settings.fmt.pix.height):
self._buffer[i,::2] = self._yuv[i,:,0]
self._buffer[i,1::4] = self._yuv[i,::2,1]
self._buffer[i,3::4] = self._yuv[i,::2,2]
# t2 = timeit.default_timer()
# sys.stderr.write('pack time: {}\n'.format(t2-t1))
os.write(self._video_device, self._buffer.tostring())
示例3: rgb2yuv
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def rgb2yuv(image):
"""
Convert the image from RGB to YUV (This is what the NVIDIA model does)
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
示例4: predict_steering
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def predict_steering(self, data):
image_array = self.roi(cv2.cvtColor(data['image'], cv2.COLOR_RGB2YUV))
transformed_image_array = image_array[None, :, :, :]
return float(model.predict(transformed_image_array, batch_size=1))
# Callback functions triggered by ControlServer
示例5: preprocess_input
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def preprocess_input(img):
return roi(cv2.cvtColor(img, cv2.COLOR_RGB2YUV))
示例6: preprocess_input
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def preprocess_input(self, img):
return self.roi(cv2.cvtColor(img, cv2.COLOR_RGB2YUV))
示例7: test_every_colorspace
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def test_every_colorspace(self):
def _image_to_channel(image, cspace):
if cspace == iaa.CSPACE_YCrCb:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)
return image_cvt[:, :, 0:0+1]
elif cspace == iaa.CSPACE_HSV:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
return image_cvt[:, :, 2:2+1]
elif cspace == iaa.CSPACE_HLS:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
return image_cvt[:, :, 1:1+1]
elif cspace == iaa.CSPACE_Lab:
if hasattr(cv2, "COLOR_RGB2Lab"):
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
else:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
return image_cvt[:, :, 0:0+1]
elif cspace == iaa.CSPACE_Luv:
if hasattr(cv2, "COLOR_RGB2Luv"):
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
else:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
return image_cvt[:, :, 0:0+1]
else:
assert cspace == iaa.CSPACE_YUV
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
return image_cvt[:, :, 0:0+1]
# Max differences between input image and image after augmentation
# when no child augmenter is used (for the given example image below).
# For some colorspaces the conversion to input colorspace isn't
# perfect.
# Values were manually checked.
max_diff_expected = {
iaa.CSPACE_YCrCb: 1,
iaa.CSPACE_HSV: 0,
iaa.CSPACE_HLS: 0,
iaa.CSPACE_Lab: 2,
iaa.CSPACE_Luv: 4,
iaa.CSPACE_YUV: 1
}
image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))
for cspace in self.valid_colorspaces:
with self.subTest(colorspace=cspace):
child = _BatchCapturingDummyAugmenter()
aug = iaa.WithBrightnessChannels(
children=child,
to_colorspace=cspace)
image_aug = aug(image=image)
expected = _image_to_channel(image, cspace)
diff = np.abs(
image.astype(np.int32) - image_aug.astype(np.int32))
assert np.all(diff <= max_diff_expected[cspace])
assert np.array_equal(child.last_batch.images[0], expected)
示例8: schedule_frame
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def schedule_frame(self, frame):
if frame.shape[0] != self._settings.fmt.pix.height:
raise Exception(
"frame height does not match the height of webcam device: {}!={}\n".format(
self._settings.fmt.pix.height, frame.shape[0]
)
)
if frame.shape[1] != self._settings.fmt.pix.width:
raise Exception(
"frame width does not match the width of webcam device: {}!={}\n".format(
self._settings.fmt.pix.width, frame.shape[1]
)
)
if frame.shape[2] != self._channels:
raise Exception(
"num frame channels does not match the num channels of webcam device: {}!={}\n".format(
self._channels, frame.shape[2]
)
)
if cv2_imported:
# t1 = timeit.default_timer()
self._yuv = cv2.cvtColor(frame, cv2.COLOR_RGB2YUV)
# t2 = timeit.default_timer()
# sys.stderr.write('conversion time: {}\n'.format(t2-t1))
else:
# t1 = timeit.default_timer()
frame = np.concatenate((frame, self._ones), axis=2)
frame = np.dot(frame, self._rgb2yuv.T)
self._yuv[:, :, :] = np.clip(frame, 0, 255)
# t2 = timeit.default_timer()
# sys.stderr.write('conversion time: {}\n'.format(t2-t1))
# t1 = timeit.default_timer()
for i in range(self._settings.fmt.pix.height):
self._buffer[i, ::2] = self._yuv[i, :, 0]
self._buffer[i, 1::4] = self._yuv[i, ::2, 1]
self._buffer[i, 3::4] = self._yuv[i, ::2, 2]
# t2 = timeit.default_timer()
# sys.stderr.write('pack time: {}\n'.format(t2-t1))
os.write(self._video_device, self._buffer.tostring())