本文整理匯總了Python中cv2.COLOR_RGB2YCR_CB屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_RGB2YCR_CB屬性的具體用法?Python cv2.COLOR_RGB2YCR_CB怎麽用?Python cv2.COLOR_RGB2YCR_CB使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.COLOR_RGB2YCR_CB屬性的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_ycrcb_mask
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2YCR_CB [as 別名]
def get_ycrcb_mask(img, debug=False):
assert isinstance(img, numpy.ndarray), 'image must be a numpy array'
assert img.ndim == 3, 'skin detection can only work on color images'
logger.debug('getting ycrcb mask')
lower_thresh = numpy.array([90, 100, 130], dtype=numpy.uint8)
upper_thresh = numpy.array([230, 120, 180], dtype=numpy.uint8)
img_ycrcb = cv2.cvtColor(img, cv2.COLOR_RGB2YCR_CB)
msk_ycrcb = cv2.inRange(img_ycrcb, lower_thresh, upper_thresh)
msk_ycrcb[msk_ycrcb < 128] = 0
msk_ycrcb[msk_ycrcb >= 128] = 1
if debug:
scripts.display('input', img)
scripts.display('mask_ycrcb', msk_ycrcb)
return msk_ycrcb.astype(float)
示例2: normalize4gan
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2YCR_CB [as 別名]
def normalize4gan(im):
'''
Convert colorspace and
cale the input in [-1, 1] range, as described in ganhacks
'''
#im = cv2.cvtColor(im, cv2.COLOR_RGB2YCR_CB).astype(np.float32)
# HSV... not helpful.
im = im.astype(np.float32)
im /= 128.0
im -= 1.0 # now in [-1, 1]
return im
示例3: test_every_colorspace
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2YCR_CB [as 別名]
def test_every_colorspace(self):
def _image_to_channel(image, cspace):
if cspace == iaa.CSPACE_YCrCb:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)
return image_cvt[:, :, 0:0+1]
elif cspace == iaa.CSPACE_HSV:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
return image_cvt[:, :, 2:2+1]
elif cspace == iaa.CSPACE_HLS:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
return image_cvt[:, :, 1:1+1]
elif cspace == iaa.CSPACE_Lab:
if hasattr(cv2, "COLOR_RGB2Lab"):
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
else:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
return image_cvt[:, :, 0:0+1]
elif cspace == iaa.CSPACE_Luv:
if hasattr(cv2, "COLOR_RGB2Luv"):
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
else:
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
return image_cvt[:, :, 0:0+1]
else:
assert cspace == iaa.CSPACE_YUV
image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
return image_cvt[:, :, 0:0+1]
# Max differences between input image and image after augmentation
# when no child augmenter is used (for the given example image below).
# For some colorspaces the conversion to input colorspace isn't
# perfect.
# Values were manually checked.
max_diff_expected = {
iaa.CSPACE_YCrCb: 1,
iaa.CSPACE_HSV: 0,
iaa.CSPACE_HLS: 0,
iaa.CSPACE_Lab: 2,
iaa.CSPACE_Luv: 4,
iaa.CSPACE_YUV: 1
}
image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))
for cspace in self.valid_colorspaces:
with self.subTest(colorspace=cspace):
child = _BatchCapturingDummyAugmenter()
aug = iaa.WithBrightnessChannels(
children=child,
to_colorspace=cspace)
image_aug = aug(image=image)
expected = _image_to_channel(image, cspace)
diff = np.abs(
image.astype(np.int32) - image_aug.astype(np.int32))
assert np.all(diff <= max_diff_expected[cspace])
assert np.array_equal(child.last_batch.images[0], expected)
示例4: compute_hist
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2YCR_CB [as 別名]
def compute_hist(image, mode = 'ltc'):
hist_1 = dict()
hist_2 = dict()
hist_error_1 = dict()
hist_error_2 = dict()
for i in classes.keys():
hist_1[i] = 0
hist_2[i] = 0
hist_error_1[i] = 0
hist_error_2[i] = 0
image = cv2.cvtColor(image*255, cv2.COLOR_RGB2YCR_CB)
# image = compute_jpeg_coef(image)
# error_clock = time.clock()
error = compute_error_image(image)
# error_dur = time.clock() - error_clock
# print('Error image computation time : ' + str(error_dur) + 'ms')
# code_1_dur = 0
for i in range(1, image.shape[0] - 2):
for j in range(1, image.shape[1] - 2):
if mode == 'lbp':
# code_1_clock = time.clock()
b = compute_code(image[i-1:i+2, j-1:j+2,0], mode)
hist_1[b] += 1
# code_1_dur += time.clock() - code_1_clock
b = compute_code(image[i-1:i+2, j-1:j+2,1], mode)
hist_2[b] += 1
# b = compute_code(error[i-1:i+2, j-1:j+2,0], mode)
# hist_error_1[b] += 1
# b = compute_code(error[i-1:i+2, j-1:j+2,1], mode)
# hist_error_2[b] += 1
if mode == 'ltc':
b = compute_code(image[i-1:i+2, j-1:j+2,0], mode)
hist_1[b[0]] += 1
hist_1[b[1]] += 1
b = compute_code(image[i-1:i+2, j-1:j+2,1], mode)
hist_2[b[0]] += 1
hist_2[b[1]] += 1
# b_error = compute_code(error[i-1:i+2, j-1:j+2])
# hist_error[b_error] += 1
# print('Code 1 computation time : ' + str(code_1_dur/((image.shape[0] - 3)*(image.shape[1] - 3))) + 'ms')
F = []
N = (image.shape[0] - 3)*(image.shape[1] - 3)
for i in hist_1.keys():
F.append(hist_1[i]/N)
F.append(hist_2[i]/N)
# F.append(hist_error_1[i]/N)
# F.append(hist_error_2[i]/N)
return(np.array(F))