本文整理汇总了Python中cv2.NORMAL_CLONE属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.NORMAL_CLONE属性的具体用法?Python cv2.NORMAL_CLONE怎么用?Python cv2.NORMAL_CLONE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.NORMAL_CLONE属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: merge_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORMAL_CLONE [as 别名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None):
face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)
for group in core.OVERLAY_POINTS:
cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
if mat_multiple:
mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
if blur_detail_x and blur_detail_y:
face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center)
return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
示例2: merge_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORMAL_CLONE [as 别名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, k_size=None, mat_multiple=None):
face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)
for group in core.OVERLAY_POINTS:
cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
if mat_multiple:
mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
if k_size:
face_mask = cv2.blur(face_mask, k_size, center)
return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
示例3: process
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORMAL_CLONE [as 别名]
def process(old_face, new_face, raw_mask):
height, width, _ = old_face.shape
height = height // 2
width = width // 2
y_indices, x_indices, _ = np.nonzero(raw_mask)
y_crop = slice(np.min(y_indices), np.max(y_indices))
x_crop = slice(np.min(x_indices), np.max(x_indices))
y_center = int(np.rint((np.max(y_indices) + np.min(y_indices)) / 2 + height))
x_center = int(np.rint((np.max(x_indices) + np.min(x_indices)) / 2 + width))
insertion = np.rint(new_face[y_crop, x_crop] * 255.0).astype("uint8")
insertion_mask = np.rint(raw_mask[y_crop, x_crop] * 255.0).astype("uint8")
insertion_mask[insertion_mask != 0] = 255
prior = np.rint(np.pad(old_face * 255.0,
((height, height), (width, width), (0, 0)),
'constant')).astype("uint8")
blended = cv2.seamlessClone(insertion, # pylint: disable=no-member
prior,
insertion_mask,
(x_center, y_center),
cv2.NORMAL_CLONE) # pylint: disable=no-member
blended = blended[height:-height, width:-width]
return blended.astype("float32") / 255.0
示例4: poisson_blend
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORMAL_CLONE [as 别名]
def poisson_blend(input, output, mask):
"""
* inputs:
- input (torch.Tensor, required)
Input tensor of Completion Network, whose shape = (N, 3, H, W).
- output (torch.Tensor, required)
Output tensor of Completion Network, whose shape = (N, 3, H, W).
- mask (torch.Tensor, required)
Input mask tensor of Completion Network, whose shape = (N, 1, H, W).
* returns:
Output image tensor of shape (N, 3, H, W) inpainted with poisson image editing method.
"""
input = input.clone().cpu()
output = output.clone().cpu()
mask = mask.clone().cpu()
mask = torch.cat((mask, mask, mask), dim=1) # convert to 3-channel format
num_samples = input.shape[0]
ret = []
for i in range(num_samples):
dstimg = transforms.functional.to_pil_image(input[i])
dstimg = np.array(dstimg)[:, :, [2, 1, 0]]
srcimg = transforms.functional.to_pil_image(output[i])
srcimg = np.array(srcimg)[:, :, [2, 1, 0]]
msk = transforms.functional.to_pil_image(mask[i])
msk = np.array(msk)[:, :, [2, 1, 0]]
# compute mask's center
xs, ys = [], []
for j in range(msk.shape[0]):
for k in range(msk.shape[1]):
if msk[j, k, 0] == 255:
ys.append(j)
xs.append(k)
xmin, xmax = min(xs), max(xs)
ymin, ymax = min(ys), max(ys)
center = ((xmax + xmin) // 2, (ymax + ymin) // 2)
dstimg = cv2.inpaint(dstimg, msk[:, :, 0], 1, cv2.INPAINT_TELEA)
out = cv2.seamlessClone(srcimg, dstimg, msk, center, cv2.NORMAL_CLONE)
out = out[:, :, [2, 1, 0]]
out = transforms.functional.to_tensor(out)
out = torch.unsqueeze(out, dim=0)
ret.append(out)
ret = torch.cat(ret, dim=0)
return ret
示例5: draw_text_seamless
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import NORMAL_CLONE [as 别名]
def draw_text_seamless(self, font, bg, word, word_color, word_height, word_width, offset):
# For better seamlessClone
seamless_offset = 6
# Draw text on a white image, than draw it on background
if self.is_bgr():
white_bg = np.ones((word_height + seamless_offset, word_width + seamless_offset, 3)) * 255
else:
white_bg = np.ones((word_height + seamless_offset, word_width + seamless_offset)) * 255
text_img = Image.fromarray(np.uint8(white_bg))
draw = ImageDraw.Draw(text_img)
# draw.text((0 + seamless_offset // 2, 0 - offset[1] + seamless_offset // 2), word,
# fill=word_color, font=font)
self.draw_text_wrapper(draw, word,
0 + seamless_offset // 2,
0 - offset[1] + seamless_offset // 2,
font, word_color)
# assume whole text_img as mask
text_img = np.array(text_img).astype(np.uint8)
text_mask = 255 * np.ones(text_img.shape, text_img.dtype)
# This is where the CENTER of the airplane will be placed
center = (bg.shape[1] // 2, bg.shape[0] // 2)
# opencv seamlessClone require bgr image
if not self.is_bgr():
text_img_bgr = np.ones((text_img.shape[0], text_img.shape[1], 3), np.uint8)
bg_bgr = np.ones((bg.shape[0], bg.shape[1], 3), np.uint8)
cv2.cvtColor(text_img, cv2.COLOR_GRAY2BGR, text_img_bgr)
cv2.cvtColor(bg, cv2.COLOR_GRAY2BGR, bg_bgr)
else:
text_img_bgr = text_img
bg_bgr = bg
flag = np.random.choice([
cv2.NORMAL_CLONE,
cv2.MIXED_CLONE,
cv2.MONOCHROME_TRANSFER
])
mixed_clone = cv2.seamlessClone(text_img_bgr, bg_bgr, text_mask, center, flag)
if not self.is_bgr():
return cv2.cvtColor(mixed_clone, cv2.COLOR_BGR2GRAY)
else:
return mixed_clone