当前位置: 首页>>代码示例>>Python>>正文


Python cv2.COLOR_RGB2YUV属性代码示例

本文整理汇总了Python中cv2.COLOR_RGB2YUV属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_RGB2YUV属性的具体用法?Python cv2.COLOR_RGB2YUV怎么用?Python cv2.COLOR_RGB2YUV使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_RGB2YUV属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_img_random_crop

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def get_img_random_crop(src, resize=512, crop=256):
    '''Get & resize image and random crop'''
    img = get_img(src)
    img = resize_to(img, resize=resize)
    
    offset_h = random.randint(0, (img.shape[0]-crop))
    offset_w = random.randint(0, (img.shape[1]-crop))
    
    img = img[offset_h:offset_h+crop, offset_w:offset_w+crop, :]

    return img

# def preserve_colors(content_rgb, styled_rgb):
#     """Extract luminance from styled image and apply colors from content"""
#     if content_rgb.shape != styled_rgb.shape:
#       new_shape = (content_rgb.shape[1], content_rgb.shape[0])
#       styled_rgb = cv2.resize(styled_rgb, new_shape)
#     styled_yuv = cv2.cvtColor(styled_rgb, cv2.COLOR_RGB2YUV)
#     Y_s, U_s, V_s = cv2.split(styled_yuv)
#     image_YUV = cv2.cvtColor(content_rgb, cv2.COLOR_RGB2YUV)
#     Y_i, U_i, V_i = cv2.split(image_YUV)
#     styled_rgb = cv2.cvtColor(np.stack([Y_s, U_i, V_i], axis=-1), cv2.COLOR_YUV2RGB)
#     return styled_rgb 
开发者ID:eridgd,项目名称:AdaIN-TF,代码行数:25,代码来源:utils.py

示例2: schedule_frame

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def schedule_frame(self, frame):

        
        if frame.shape[0] != self._settings.fmt.pix.height:
            raise Exception('frame height does not match the height of webcam device: {}!={}\n'.format(self._settings.fmt.pix.height, frame.shape[0]))
        if frame.shape[1] != self._settings.fmt.pix.width:
            raise Exception('frame width does not match the width of webcam device: {}!={}\n'.format(self._settings.fmt.pix.width, frame.shape[1]))
        if frame.shape[2] != self._channels:
            raise Exception('num frame channels does not match the num channels of webcam device: {}!={}\n'.format(self._channels, frame.shape[2]))

        if cv2_imported:
            # t1 = timeit.default_timer()
            self._yuv = cv2.cvtColor(frame, cv2.COLOR_RGB2YUV)
            # t2 = timeit.default_timer()
            # sys.stderr.write('conversion time: {}\n'.format(t2-t1))                    
        else:
            # t1 = timeit.default_timer()
            frame = np.concatenate((frame, self._ones), axis=2)
            frame = np.dot(frame, self._rgb2yuv.T)
            self._yuv[:,:,:] = np.clip(frame, 0, 255)
            # t2 = timeit.default_timer()
            # sys.stderr.write('conversion time: {}\n'.format(t2-t1))                    
            
        # t1 = timeit.default_timer()
        for i in range(self._settings.fmt.pix.height):
            self._buffer[i,::2] = self._yuv[i,:,0]
            self._buffer[i,1::4] = self._yuv[i,::2,1]
            self._buffer[i,3::4] = self._yuv[i,::2,2]                
        # t2 = timeit.default_timer()
        # sys.stderr.write('pack time: {}\n'.format(t2-t1))
            
        os.write(self._video_device, self._buffer.tostring()) 
开发者ID:jremmons,项目名称:pyfakewebcam,代码行数:34,代码来源:pyfakewebcam.py

示例3: rgb2yuv

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def rgb2yuv(image):
    """
    Convert the image from RGB to YUV (This is what the NVIDIA model does)
    """
    return cv2.cvtColor(image, cv2.COLOR_RGB2YUV) 
开发者ID:BerkeleyLearnVerify,项目名称:VerifAI,代码行数:7,代码来源:utils.py

示例4: predict_steering

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def predict_steering(self, data):
        image_array = self.roi(cv2.cvtColor(data['image'], cv2.COLOR_RGB2YUV))
        transformed_image_array = image_array[None, :, :, :]

        return float(model.predict(transformed_image_array, batch_size=1))

    # Callback functions triggered by ControlServer 
开发者ID:thomasantony,项目名称:sdc-live-trainer,代码行数:9,代码来源:hybrid_driver.py

示例5: preprocess_input

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def preprocess_input(img):
    return roi(cv2.cvtColor(img, cv2.COLOR_RGB2YUV)) 
开发者ID:thomasantony,项目名称:sdc-live-trainer,代码行数:4,代码来源:drive.py

示例6: preprocess_input

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def preprocess_input(self, img):
        return self.roi(cv2.cvtColor(img, cv2.COLOR_RGB2YUV)) 
开发者ID:thomasantony,项目名称:sdc-live-trainer,代码行数:4,代码来源:live_trainer.py

示例7: test_every_colorspace

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def test_every_colorspace(self):
        def _image_to_channel(image, cspace):
            if cspace == iaa.CSPACE_YCrCb:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_HSV:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
                return image_cvt[:, :, 2:2+1]
            elif cspace == iaa.CSPACE_HLS:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
                return image_cvt[:, :, 1:1+1]
            elif cspace == iaa.CSPACE_Lab:
                if hasattr(cv2, "COLOR_RGB2Lab"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_Luv:
                if hasattr(cv2, "COLOR_RGB2Luv"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
                return image_cvt[:, :, 0:0+1]
            else:
                assert cspace == iaa.CSPACE_YUV
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
                return image_cvt[:, :, 0:0+1]

        # Max differences between input image and image after augmentation
        # when no child augmenter is used (for the given example image below).
        # For some colorspaces the conversion to input colorspace isn't
        # perfect.
        # Values were manually checked.
        max_diff_expected = {
            iaa.CSPACE_YCrCb: 1,
            iaa.CSPACE_HSV: 0,
            iaa.CSPACE_HLS: 0,
            iaa.CSPACE_Lab: 2,
            iaa.CSPACE_Luv: 4,
            iaa.CSPACE_YUV: 1
        }

        image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))

        for cspace in self.valid_colorspaces:
            with self.subTest(colorspace=cspace):
                child = _BatchCapturingDummyAugmenter()
                aug = iaa.WithBrightnessChannels(
                    children=child,
                    to_colorspace=cspace)

                image_aug = aug(image=image)

                expected = _image_to_channel(image, cspace)
                diff = np.abs(
                    image.astype(np.int32) - image_aug.astype(np.int32))
                assert np.all(diff <= max_diff_expected[cspace])
                assert np.array_equal(child.last_batch.images[0], expected) 
开发者ID:aleju,项目名称:imgaug,代码行数:60,代码来源:test_color.py

示例8: schedule_frame

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_RGB2YUV [as 别名]
def schedule_frame(self, frame):

        if frame.shape[0] != self._settings.fmt.pix.height:
            raise Exception(
                "frame height does not match the height of webcam device: {}!={}\n".format(
                    self._settings.fmt.pix.height, frame.shape[0]
                )
            )
        if frame.shape[1] != self._settings.fmt.pix.width:
            raise Exception(
                "frame width does not match the width of webcam device: {}!={}\n".format(
                    self._settings.fmt.pix.width, frame.shape[1]
                )
            )
        if frame.shape[2] != self._channels:
            raise Exception(
                "num frame channels does not match the num channels of webcam device: {}!={}\n".format(
                    self._channels, frame.shape[2]
                )
            )

        if cv2_imported:
            # t1 = timeit.default_timer()
            self._yuv = cv2.cvtColor(frame, cv2.COLOR_RGB2YUV)
            # t2 = timeit.default_timer()
            # sys.stderr.write('conversion time: {}\n'.format(t2-t1))
        else:
            # t1 = timeit.default_timer()
            frame = np.concatenate((frame, self._ones), axis=2)
            frame = np.dot(frame, self._rgb2yuv.T)
            self._yuv[:, :, :] = np.clip(frame, 0, 255)
            # t2 = timeit.default_timer()
            # sys.stderr.write('conversion time: {}\n'.format(t2-t1))

        # t1 = timeit.default_timer()
        for i in range(self._settings.fmt.pix.height):
            self._buffer[i, ::2] = self._yuv[i, :, 0]
            self._buffer[i, 1::4] = self._yuv[i, ::2, 1]
            self._buffer[i, 3::4] = self._yuv[i, ::2, 2]
        # t2 = timeit.default_timer()
        # sys.stderr.write('pack time: {}\n'.format(t2-t1))

        os.write(self._video_device, self._buffer.tostring()) 
开发者ID:charlielito,项目名称:snapchat-filters-opencv,代码行数:45,代码来源:pyfakewebcam.py


注:本文中的cv2.COLOR_RGB2YUV属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。