当前位置: 首页>>代码示例>>Python>>正文


Python cv2.COLOR_BGR2YCR_CB属性代码示例

本文整理汇总了Python中cv2.COLOR_BGR2YCR_CB属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.COLOR_BGR2YCR_CB属性的具体用法?Python cv2.COLOR_BGR2YCR_CB怎么用?Python cv2.COLOR_BGR2YCR_CB使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_BGR2YCR_CB属性的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prepare_raw

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def prepare_raw(path):
	# Settings.
	data = []
	color = []
	# Read in image and convert to ycrcb color space.
	img = cv.imread(path)
	im = cv.cvtColor(img, cv.COLOR_BGR2YCR_CB)
	img = im2double(im) # Only use the luminance value.

	size = img.shape
	img_temp = scipy.misc.imresize(img, [size[0] * multiplier, size[1] * multiplier], interp='bicubic')
	color_temp = scipy.misc.imresize(im, [size[0] * multiplier, size[1] * multiplier], interp='bicubic')
	im_label = img_temp[:, :, 0]
	im_color = color_temp[:, :, 1:3]

	data = np.array(im_label).reshape([1, img.shape[0] * multiplier, img.shape[1] * multiplier, 1])
	color = np.array(im_color)

	return data, color


# Use the trained model to generate super-resolutioned image. 
开发者ID:Edwardlzy,项目名称:SRCNN,代码行数:24,代码来源:use_SRCNN.py

示例2: convert_to_original_colors

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def convert_to_original_colors(content_img, stylized_img):
  content_img  = postprocess(content_img)
  stylized_img = postprocess(stylized_img)
  if args.color_convert_type == 'yuv':
    cvt_type = cv2.COLOR_BGR2YUV
    inv_cvt_type = cv2.COLOR_YUV2BGR
  elif args.color_convert_type == 'ycrcb':
    cvt_type = cv2.COLOR_BGR2YCR_CB
    inv_cvt_type = cv2.COLOR_YCR_CB2BGR
  elif args.color_convert_type == 'luv':
    cvt_type = cv2.COLOR_BGR2LUV
    inv_cvt_type = cv2.COLOR_LUV2BGR
  elif args.color_convert_type == 'lab':
    cvt_type = cv2.COLOR_BGR2LAB
    inv_cvt_type = cv2.COLOR_LAB2BGR
  content_cvt = cv2.cvtColor(content_img, cvt_type)
  stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
  c1, _, _ = cv2.split(stylized_cvt)
  _, c2, c3 = cv2.split(content_cvt)
  merged = cv2.merge((c1, c2, c3))
  dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
  dst = preprocess(dst)
  return dst 
开发者ID:cysmith,项目名称:neural-style-tf,代码行数:25,代码来源:neural_style.py

示例3: hist

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def hist(img):
    ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split(ycrcb)
    cv2.equalizeHist(channels[0], channels[0])  #输入通道、输出通道矩阵
    cv2.merge(channels, ycrcb)  #合并结果通道
    cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
    return img 
开发者ID:vipstone,项目名称:faceai,代码行数:9,代码来源:eye2.py

示例4: get_embeddings

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def get_embeddings(self, frame, face):
        (x, y, w, h) = face
        img = frame[y:y+h, x:x+w]
        img_ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
        img_luv = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)
        hist_ycrcb = self.calc_hist(img_ycrcb)
        hist_luv = self.calc_hist(img_luv)
        feature_vector = np.append(hist_ycrcb.ravel(), hist_luv.ravel())
        return feature_vector.reshape(1, len(feature_vector))

    # private function 
开发者ID:richmondu,项目名称:libfaceid,代码行数:13,代码来源:liveness.py

示例5: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def __call__(self, image):
        img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        img_ycc = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)
        img = np.concatenate((img_hsv, img_ycc), 2)

        return img 
开发者ID:starimeL,项目名称:PytorchConverter,代码行数:8,代码来源:test.py

示例6: skin_detector_ycrcb

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def skin_detector_ycrcb(bgr_image):
    """Skin segmentation algorithm based on the YCrCb color space.
    See 'Face Segmentation Using Skin-Color Map in Videophone Applications'"""

    # Convert image from BGR to YCrCb color space:
    ycrcb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2YCR_CB)

    # Find region with skin tone in YCrCb image
    skin_region = cv2.inRange(ycrcb_image, lower_ycrcb, upper_ycrcb)
    return skin_region


# Values are taken from: 'RGB-H-CbCr Skin Colour Model for Human Face Detection'
# (R > 95) AND (G > 40) AND (B > 20) AND (max{R, G, B} − min{R, G, B} > 15) AND (|R − G| > 15) AND (R > G) AND (R > B)
# (R > 220) AND (G > 210) AND (B > 170) AND (|R − G| ≤ 15) AND (R > B) AND (G > B) 
开发者ID:PacktPublishing,项目名称:Mastering-OpenCV-4-with-Python,代码行数:17,代码来源:skin_segmentation.py

示例7: modcrop_color

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def modcrop_color(image, scale=3):
	size = image.shape[0:2]
	size -= np.mod(size, scale)
	image = image[0:size[0], 0:size[1], :]
	return image

# Load and preprocess the training images.
# dirpath = './Test/Set5/'
# for root, dirs, files in os.walk(dirpath):
# 	for file in files:
# 		# Read in image and convert to ycrcb color space.
# 		img = cv.imread(dirpath + file)
# 		# cv.imshow('image',img)
# 		# cv.waitKey(0)
# 		# cv.destroyAllWindows()
# 		img = cv.cvtColor(img, cv.COLOR_BGR2YCR_CB)
# 		img = im2double(img) # Only use the luminance value.

# 		# Create groundtruth and baseline image.
# 		im_label = modcrop(img)
# 		size = im_label.shape
# 		h = size[0]
# 		w = size[1]
# 		im_temp = scipy.misc.imresize(im_label, 1/scale, interp='bicubic')
# 		im_input = scipy.misc.imresize(im_temp, size, interp='bicubic')

# 		# Generate subimages for training.
# 		for x in range(0, h - size_input, stride):
# 			for y in range(0, w - size_input, stride):
# 				subim_input = im_input[x : x + size_input, y : y + size_input]
# 				subim_label = im_label[int(x + padding) : int(x + padding + size_label), int(y + padding) : int(y + padding + size_label)]
				
# 				subim_input = subim_input.reshape([size_input, size_input, 1])
# 				subim_label = subim_label.reshape([size_label, size_label, 1])

# 				data.append(subim_input)
# 				label.append(subim_label)
# 				counter += 1

# # Shuffle the data pairs.
# order = np.random.choice(counter, counter, replace=False)
# data = np.array([data[i] for i in order])
# label = np.array([label[i] for i in order])

# print('data shape is', data.shape)
# print('label shape is', label.shape)

# # Write to HDF5 file.
# savepath = os.path.join(os.getcwd(), 'checkpoint/test.h5')
# with h5py.File(savepath, 'w') as hf:
#     hf.create_dataset('data', data=data)
#     hf.create_dataset('label', data=label) 
开发者ID:Edwardlzy,项目名称:SRCNN,代码行数:54,代码来源:prepare_test.py

示例8: prepare_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def prepare_data(path):
	# Settings.
	data = []
	label = []
	padding = abs(size_input - size_label) / 2
	stride = 21
	# Read in image and convert to ycrcb color space.
	img_input = cv.imread(path)
	im = cv.cvtColor(img_input, cv.COLOR_BGR2YCR_CB)
	img = im2double(im) # Only use the luminance value.

	# Create groundtruth and baseline image.
	im_label = modcrop_color(img, scale=multiplier)
	color_base = modcrop_color(im, scale=multiplier)
	size = im_label.shape
	h = size[0]
	w = size[1]
	im_blur = scipy.misc.imresize(im_label, 1 / multiplier, interp='bicubic')
	im_input = scipy.misc.imresize(im_blur, multiplier * 1.0, interp='bicubic')

	# print('im_temp shape:', im_temp.shape)
	# print('im_input shape:', im_input.shape)

	# Generate subimages.
	# for x in range(0, h - size_input, stride):
	# 	for y in range(0, w - size_input, stride):
	# 		subim_input = im_input[x : x + size_input, y : y + size_input]
	# 		subim_label = im_label[int(x + padding) : int(x + padding + size_label), int(y + padding) : int(y + padding + size_label)]
			
	# 		subim_input = subim_input.reshape([size_input, size_input, 1])
	# 		subim_label = subim_label.reshape([size_label, size_label, 1])

	# 		data.append(subim_input)
	# 		label.append(subim_label)

	data = np.array(im_input[:,:,0]).reshape([1, h, w, 1])
	color = np.array(color_base[:,:,1:3])
	label = np.array(modcrop_color(img_input))

	# Write to HDF5 file.
	# savepath = os.path.join(os.getcwd(), 'checkpoint/test_image.h5')
	# with h5py.File(savepath, 'w') as hf:
	# 	hf.create_dataset('data', data=data)
	# 	hf.create_dataset('label', data=label)

	return data, label, color

# Prepare original data without blurring. 
开发者ID:Edwardlzy,项目名称:SRCNN,代码行数:50,代码来源:use_SRCNN.py

示例9: houghCircles

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def houghCircles(path, counter):
    img = cv2.imread(path, 0)
    # img = cv2.medianBlur(img, 5)

    x = cv2.Sobel(img, -1, 1, 0, ksize=3)
    y = cv2.Sobel(img, -1, 0, 1, ksize=3)
    absx = cv2.convertScaleAbs(x)
    absy = cv2.convertScaleAbs(y)
    img = cv2.addWeighted(absx, 0.5, absy, 0.5, 0)

    # ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    # channels = cv2.split(ycrcb)
    # cv2.equalizeHist(channels[0], channels[0])  #输入通道、输出通道矩阵
    # cv2.merge(channels, ycrcb)  #合并结果通道
    # cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)

    # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

    # cv2.imshow("img2", img)
    # cv2.imshow("grayimg", grayimg)

    circles = cv2.HoughCircles(
        img,
        cv2.HOUGH_GRADIENT,
        1,
        50,
        param1=50,
        param2=10,
        minRadius=2,
        maxRadius=0)

    circles = np.uint16(np.around(circles))
    for i in circles[0, :]:
        # draw the outer circle
        # cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 1)
        # draw the center of the circle
        cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 2)
    # cv2.imshow("img" + str(counter), cimg)
    return (i[0] + 3, i[1] + 3)


#彩色直方图均衡化 
开发者ID:vipstone,项目名称:faceai,代码行数:46,代码来源:eye.py

示例10: make_sub_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 别名]
def make_sub_data(data, config):
    if config.matlab_bicubic:
        import matlab.engine
        eng = matlab.engine.start_matlab()
        mdouble = matlab.double
    else:
        eng = None
        mdouble = None

    times = 0
    for i in range(len(data)):
        input_, label_, = preprocess(data[i], config.scale, eng, mdouble)
        if len(input_.shape) == 3:
            h, w, c = input_.shape
        else:
            h, w = input_.shape

        for x in range(0, h * config.scale - config.image_size * config.scale + 1, config.stride * config.scale):
            for y in range(0, w * config.scale - config.image_size * config.scale + 1, config.stride * config.scale):
                sub_label = label_[x: x + config.image_size * config.scale, y: y + config.image_size * config.scale]
                
                sub_label = sub_label.reshape([config.image_size * config.scale , config.image_size * config.scale, config.c_dim])

                t = cv2.cvtColor(sub_label, cv2.COLOR_BGR2YCR_CB)
                t = t[:, :, 0]
                gx = t[1:, 0:-1] - t[0:-1, 0:-1]
                gy = t[0:-1, 1:] - t[0:-1, 0:-1]
                Gxy = (gx**2 + gy**2)**0.5
                r_gxy = float((Gxy > 10).sum()) / ((config.image_size*config.scale)**2) * 100
                if r_gxy < 10:
                    continue

                sub_label =  sub_label / 255.0

                x_i = int(x / config.scale)
                y_i = int(y / config.scale)
                sub_input = input_[x_i: x_i + config.image_size, y_i: y_i + config.image_size]
                sub_input = sub_input.reshape([config.image_size, config.image_size, config.c_dim])
                sub_input = sub_input / 255.0

                # checkimage(sub_input)
                # checkimage(sub_label)

                save_flag = make_data_hf(sub_input, sub_label, config, times)
                if not save_flag:
                    return
                times += 1

        print("image: [%2d], total: [%2d]"%(i, len(data)))

    if config.matlab_bicubic:
        eng.quit() 
开发者ID:hengchuan,项目名称:RDN-TensorFlow,代码行数:54,代码来源:utils.py


注:本文中的cv2.COLOR_BGR2YCR_CB属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。