本文整理汇总了Python中cv2.merge方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.merge方法的具体用法?Python cv2.merge怎么用?Python cv2.merge使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.merge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write_song
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def write_song(piano_roll, filename):
""" Save the song on disk
Args:
piano_roll (np.array): a song object containing the tracks and melody
filename (str): the path were to save the song (don't add the file extension)
"""
note_played = piano_roll > 0.5
piano_roll_int = np.uint8(piano_roll*255)
b = piano_roll_int * (~note_played).astype(np.uint8) # Note silenced
g = np.zeros(piano_roll_int.shape, dtype=np.uint8) # Empty channel
r = piano_roll_int * note_played.astype(np.uint8) # Notes played
img = cv.merge((b, g, r))
# TODO: We could insert a first column indicating the piano keys (black/white key)
cv.imwrite(filename + '.png', img)
示例2: extract_grayscale
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def extract_grayscale(img, srgb=False):
dw = img.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
precision = Imath.PixelType(Imath.PixelType.FLOAT)
R = img.channel('R', precision)
G = img.channel('G', precision)
B = img.channel('B', precision)
r = np.fromstring(R, dtype = np.float32)
g = np.fromstring(G, dtype = np.float32)
b = np.fromstring(B, dtype = np.float32)
r.shape = (size[1], size[0])
g.shape = (size[1], size[0])
b.shape = (size[1], size[0])
rgb = cv2.merge([b, g, r])
grayscale = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
if srgb:
grayscale = lin2srgb(grayscale)
return grayscale
示例3: overlay_transparent_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def overlay_transparent_image(bg, fg, x1, y1):
# bg is 3 RGB
# fg is 4 RGBA
bg = bg.copy()
fg = fg.copy()
h, w = fg.shape[:2]
t = bg[y1:y1 + h, x1:x1 + w]
b, g, r, a = cv2.split(fg)
mask = cv2.merge((a, a, a))
fg = cv2.merge((b, g, r))
overlaid = alpha_blend(t, fg, mask)
bg[y1:y1 + h, x1:x1 + w] = overlaid
return bg
示例4: get_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def get_image():
image = request.files.get('image')
if not image:
raise ValueError
img = Image.open(image.stream).convert('RGB')
img = np.asarray(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
b = cv2.distanceTransform(img, distanceType=cv2.DIST_L2, maskSize=5)
g = cv2.distanceTransform(img, distanceType=cv2.DIST_L1, maskSize=5)
r = cv2.distanceTransform(img, distanceType=cv2.DIST_C, maskSize=5)
# merge the transformed channels back to an image
transformed_image = cv2.merge((b, g, r))
return transformed_image
示例5: read_image_by_index
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def read_image_by_index(self, ind, ):
train_image_filepath = os.path.join(self.data_dir, self.x_train[index])
train_image_label = np.zeros((self.nb_classes,))
train_image_label[self.y_train[index]] = 1
train_image = io.imread(train_image_filepath)
# in case of single channel image
if len(train_image.shape) == 2:
train_image = cv2.merge([train_image, train_image, train_image])
# in case of RGBA image
if train_image.shape[2] == 4:
train_image = train_image[:, :, 0:3]
# other cases
if len(train_image.shape) != 3 or train_image.shape[2] != 3:
return None, None
train_image = cv2.resize(train_image, (self.input_shape[1], self.input_shape[0])).astype(np.float32) / 255.0
return train_image, train_image_label
示例6: convert_to_original_colors
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def convert_to_original_colors(content_img, stylized_img):
content_img = postprocess(content_img)
stylized_img = postprocess(stylized_img)
if args.color_convert_type == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif args.color_convert_type == 'ycrcb':
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif args.color_convert_type == 'luv':
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif args.color_convert_type == 'lab':
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = cv2.cvtColor(content_img, cvt_type)
stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
c1, _, _ = cv2.split(stylized_cvt)
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
dst = preprocess(dst)
return dst
示例7: random_hue_saturation_value
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def random_hue_saturation_value(image,
hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255)):
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
示例8: normalized
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def normalized(self):
# t1=time.time()
b=self.down[:,:,0]
g=self.down[:,:,1]
r=self.down[:,:,2]
sum=b+g+r
self.norm[:,:,0]=b/sum*255.0
self.norm[:,:,1]=g/sum*255.0
self.norm[:,:,2]=r/sum*255.0
# print "conversion time",time.time()-t1
#self.norm=cv2.merge([self.norm1,self.norm2,self.norm3])
self.norm_rgb=cv2.convertScaleAbs(self.norm)
#self.norm.dtype=np.uint8
return self.norm_rgb
示例9: get_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def get_image():
image = request.files.get('image')
if not image:
raise ValueError
basewidth = 300
#wpercent = (basewidth/float(Image.open(image.stream).size[0]))
#hsize = int((float(Image.open(image.stream).size[1])*float(wpercent)))
img = Image.open(image.stream).convert('RGB')
img = np.asarray(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
b = cv2.distanceTransform(img, distanceType=cv2.DIST_L2, maskSize=5)
g = cv2.distanceTransform(img, distanceType=cv2.DIST_L1, maskSize=5)
r = cv2.distanceTransform(img, distanceType=cv2.DIST_C, maskSize=5)
# merge the transformed channels back to an image
transformed_image = cv2.merge((b, g, r))
return transformed_image
示例10: backprojection
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def backprojection(target, roihist):
'''图像预处理'''
hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
# Now convolute with circular disc
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
cv2.filter2D(dst,-1,disc,dst)
# threshold and binary AND
ret,binary = cv2.threshold(dst,80,255,0)
# 创建 核
kernel = np.ones((5,5), np.uint8)
iter_time = 1
# 闭运算
binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)
thresh = cv2.merge((binary,binary,binary))
target_filter = cv2.bitwise_and(target,thresh)
return binary, target_filter
示例11: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def main():
image = cv2.imread("../data/house.tiff", 1)
blue, green, red = cv2.split(image)
rows, columns, channels = image.shape
output = np.empty((rows, columns * 3, 3), np.uint8)
output[:, 0:columns] = cv2.merge([blue, blue, blue])
output[:, columns:columns * 2] = cv2.merge([green, green, green])
output[:, columns * 2:columns * 3] = cv2.merge([red, red, red])
hsvimage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hue, satr, vlue = cv2.split(hsvimage)
hsvoutput = np.concatenate((hue, satr, vlue), axis=1)
cv2.imshow("Sample Image", image)
cv2.imshow("Output Image", output)
cv2.imshow("HSV Image", hsvoutput)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例12: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def main():
imageOne = cv2.imread("../data/house.tiff", 1)
imageOne = cv2.cvtColor(imageOne, cv2.COLOR_BGR2RGB)
red, green, blue = cv2.split(imageOne)
images = [cv2.merge((red, green, blue)), red, green, blue]
titles = ["Default RGB Image", "Only Red", "Only Blue", "Only Green"]
cmaps = ["gray", "Reds", "Greens", "Blues"]
for i in range(4):
plt.subplot(2, 2, i + 1)
plt.imshow(images[i], cmap=cmaps[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
示例13: randomHueSaturationValue
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
示例14: get_alpha
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha.astype(
imtmp.dtype)))
return im_RGBA
示例15: append_alpha
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import merge [as 别名]
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA