本文整理匯總了Python中cv2.COLOR_BGR2YCR_CB屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_BGR2YCR_CB屬性的具體用法?Python cv2.COLOR_BGR2YCR_CB怎麽用?Python cv2.COLOR_BGR2YCR_CB使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.COLOR_BGR2YCR_CB屬性的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: prepare_raw
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def prepare_raw(path):
# Settings.
data = []
color = []
# Read in image and convert to ycrcb color space.
img = cv.imread(path)
im = cv.cvtColor(img, cv.COLOR_BGR2YCR_CB)
img = im2double(im) # Only use the luminance value.
size = img.shape
img_temp = scipy.misc.imresize(img, [size[0] * multiplier, size[1] * multiplier], interp='bicubic')
color_temp = scipy.misc.imresize(im, [size[0] * multiplier, size[1] * multiplier], interp='bicubic')
im_label = img_temp[:, :, 0]
im_color = color_temp[:, :, 1:3]
data = np.array(im_label).reshape([1, img.shape[0] * multiplier, img.shape[1] * multiplier, 1])
color = np.array(im_color)
return data, color
# Use the trained model to generate super-resolutioned image.
示例2: convert_to_original_colors
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def convert_to_original_colors(content_img, stylized_img):
content_img = postprocess(content_img)
stylized_img = postprocess(stylized_img)
if args.color_convert_type == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif args.color_convert_type == 'ycrcb':
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif args.color_convert_type == 'luv':
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif args.color_convert_type == 'lab':
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = cv2.cvtColor(content_img, cvt_type)
stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
c1, _, _ = cv2.split(stylized_cvt)
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
dst = preprocess(dst)
return dst
示例3: hist
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def hist(img):
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
channels = cv2.split(ycrcb)
cv2.equalizeHist(channels[0], channels[0]) #輸入通道、輸出通道矩陣
cv2.merge(channels, ycrcb) #合並結果通道
cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
return img
示例4: get_embeddings
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def get_embeddings(self, frame, face):
(x, y, w, h) = face
img = frame[y:y+h, x:x+w]
img_ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
img_luv = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)
hist_ycrcb = self.calc_hist(img_ycrcb)
hist_luv = self.calc_hist(img_luv)
feature_vector = np.append(hist_ycrcb.ravel(), hist_luv.ravel())
return feature_vector.reshape(1, len(feature_vector))
# private function
示例5: __call__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def __call__(self, image):
img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
img_ycc = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)
img = np.concatenate((img_hsv, img_ycc), 2)
return img
示例6: skin_detector_ycrcb
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def skin_detector_ycrcb(bgr_image):
"""Skin segmentation algorithm based on the YCrCb color space.
See 'Face Segmentation Using Skin-Color Map in Videophone Applications'"""
# Convert image from BGR to YCrCb color space:
ycrcb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2YCR_CB)
# Find region with skin tone in YCrCb image
skin_region = cv2.inRange(ycrcb_image, lower_ycrcb, upper_ycrcb)
return skin_region
# Values are taken from: 'RGB-H-CbCr Skin Colour Model for Human Face Detection'
# (R > 95) AND (G > 40) AND (B > 20) AND (max{R, G, B} − min{R, G, B} > 15) AND (|R − G| > 15) AND (R > G) AND (R > B)
# (R > 220) AND (G > 210) AND (B > 170) AND (|R − G| ≤ 15) AND (R > B) AND (G > B)
示例7: modcrop_color
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def modcrop_color(image, scale=3):
size = image.shape[0:2]
size -= np.mod(size, scale)
image = image[0:size[0], 0:size[1], :]
return image
# Load and preprocess the training images.
# dirpath = './Test/Set5/'
# for root, dirs, files in os.walk(dirpath):
# for file in files:
# # Read in image and convert to ycrcb color space.
# img = cv.imread(dirpath + file)
# # cv.imshow('image',img)
# # cv.waitKey(0)
# # cv.destroyAllWindows()
# img = cv.cvtColor(img, cv.COLOR_BGR2YCR_CB)
# img = im2double(img) # Only use the luminance value.
# # Create groundtruth and baseline image.
# im_label = modcrop(img)
# size = im_label.shape
# h = size[0]
# w = size[1]
# im_temp = scipy.misc.imresize(im_label, 1/scale, interp='bicubic')
# im_input = scipy.misc.imresize(im_temp, size, interp='bicubic')
# # Generate subimages for training.
# for x in range(0, h - size_input, stride):
# for y in range(0, w - size_input, stride):
# subim_input = im_input[x : x + size_input, y : y + size_input]
# subim_label = im_label[int(x + padding) : int(x + padding + size_label), int(y + padding) : int(y + padding + size_label)]
# subim_input = subim_input.reshape([size_input, size_input, 1])
# subim_label = subim_label.reshape([size_label, size_label, 1])
# data.append(subim_input)
# label.append(subim_label)
# counter += 1
# # Shuffle the data pairs.
# order = np.random.choice(counter, counter, replace=False)
# data = np.array([data[i] for i in order])
# label = np.array([label[i] for i in order])
# print('data shape is', data.shape)
# print('label shape is', label.shape)
# # Write to HDF5 file.
# savepath = os.path.join(os.getcwd(), 'checkpoint/test.h5')
# with h5py.File(savepath, 'w') as hf:
# hf.create_dataset('data', data=data)
# hf.create_dataset('label', data=label)
示例8: prepare_data
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def prepare_data(path):
# Settings.
data = []
label = []
padding = abs(size_input - size_label) / 2
stride = 21
# Read in image and convert to ycrcb color space.
img_input = cv.imread(path)
im = cv.cvtColor(img_input, cv.COLOR_BGR2YCR_CB)
img = im2double(im) # Only use the luminance value.
# Create groundtruth and baseline image.
im_label = modcrop_color(img, scale=multiplier)
color_base = modcrop_color(im, scale=multiplier)
size = im_label.shape
h = size[0]
w = size[1]
im_blur = scipy.misc.imresize(im_label, 1 / multiplier, interp='bicubic')
im_input = scipy.misc.imresize(im_blur, multiplier * 1.0, interp='bicubic')
# print('im_temp shape:', im_temp.shape)
# print('im_input shape:', im_input.shape)
# Generate subimages.
# for x in range(0, h - size_input, stride):
# for y in range(0, w - size_input, stride):
# subim_input = im_input[x : x + size_input, y : y + size_input]
# subim_label = im_label[int(x + padding) : int(x + padding + size_label), int(y + padding) : int(y + padding + size_label)]
# subim_input = subim_input.reshape([size_input, size_input, 1])
# subim_label = subim_label.reshape([size_label, size_label, 1])
# data.append(subim_input)
# label.append(subim_label)
data = np.array(im_input[:,:,0]).reshape([1, h, w, 1])
color = np.array(color_base[:,:,1:3])
label = np.array(modcrop_color(img_input))
# Write to HDF5 file.
# savepath = os.path.join(os.getcwd(), 'checkpoint/test_image.h5')
# with h5py.File(savepath, 'w') as hf:
# hf.create_dataset('data', data=data)
# hf.create_dataset('label', data=label)
return data, label, color
# Prepare original data without blurring.
示例9: houghCircles
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def houghCircles(path, counter):
img = cv2.imread(path, 0)
# img = cv2.medianBlur(img, 5)
x = cv2.Sobel(img, -1, 1, 0, ksize=3)
y = cv2.Sobel(img, -1, 0, 1, ksize=3)
absx = cv2.convertScaleAbs(x)
absy = cv2.convertScaleAbs(y)
img = cv2.addWeighted(absx, 0.5, absy, 0.5, 0)
# ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
# channels = cv2.split(ycrcb)
# cv2.equalizeHist(channels[0], channels[0]) #輸入通道、輸出通道矩陣
# cv2.merge(channels, ycrcb) #合並結果通道
# cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# cv2.imshow("img2", img)
# cv2.imshow("grayimg", grayimg)
circles = cv2.HoughCircles(
img,
cv2.HOUGH_GRADIENT,
1,
50,
param1=50,
param2=10,
minRadius=2,
maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
# cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 1)
# draw the center of the circle
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 2)
# cv2.imshow("img" + str(counter), cimg)
return (i[0] + 3, i[1] + 3)
#彩色直方圖均衡化
示例10: make_sub_data
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_BGR2YCR_CB [as 別名]
def make_sub_data(data, config):
if config.matlab_bicubic:
import matlab.engine
eng = matlab.engine.start_matlab()
mdouble = matlab.double
else:
eng = None
mdouble = None
times = 0
for i in range(len(data)):
input_, label_, = preprocess(data[i], config.scale, eng, mdouble)
if len(input_.shape) == 3:
h, w, c = input_.shape
else:
h, w = input_.shape
for x in range(0, h * config.scale - config.image_size * config.scale + 1, config.stride * config.scale):
for y in range(0, w * config.scale - config.image_size * config.scale + 1, config.stride * config.scale):
sub_label = label_[x: x + config.image_size * config.scale, y: y + config.image_size * config.scale]
sub_label = sub_label.reshape([config.image_size * config.scale , config.image_size * config.scale, config.c_dim])
t = cv2.cvtColor(sub_label, cv2.COLOR_BGR2YCR_CB)
t = t[:, :, 0]
gx = t[1:, 0:-1] - t[0:-1, 0:-1]
gy = t[0:-1, 1:] - t[0:-1, 0:-1]
Gxy = (gx**2 + gy**2)**0.5
r_gxy = float((Gxy > 10).sum()) / ((config.image_size*config.scale)**2) * 100
if r_gxy < 10:
continue
sub_label = sub_label / 255.0
x_i = int(x / config.scale)
y_i = int(y / config.scale)
sub_input = input_[x_i: x_i + config.image_size, y_i: y_i + config.image_size]
sub_input = sub_input.reshape([config.image_size, config.image_size, config.c_dim])
sub_input = sub_input / 255.0
# checkimage(sub_input)
# checkimage(sub_label)
save_flag = make_data_hf(sub_input, sub_label, config, times)
if not save_flag:
return
times += 1
print("image: [%2d], total: [%2d]"%(i, len(data)))
if config.matlab_bicubic:
eng.quit()