本文整理汇总了Python中cv2.seamlessClone方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.seamlessClone方法的具体用法?Python cv2.seamlessClone怎么用?Python cv2.seamlessClone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.seamlessClone方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SeamlessClone_trimap
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def SeamlessClone_trimap(srcIm,dstIm,imMask,offX,offY):
dstIm=dstIm.copy()
bimsk=imMask>0
new_msk=np.zeros(dstIm.shape[:2],dtype='uint8')
new_msk[offY:offY+imMask.shape[0],offX:offX+imMask.shape[1]]=imMask
dstIm[new_msk>0]=srcIm[imMask>0]
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
bimsk=bimsk.astype('uint8')
bdmsk=cv2.dilate(bimsk,kernel)-cv2.erode(bimsk,kernel)
mask255=bdmsk>0
mask255=(mask255*255).astype('uint8')
offCenter=(int(offX+imMask.shape[1]/2),int(offY+imMask.shape[0]/2))
if np.any(bdmsk>0):
outputIm=cv2.seamlessClone(srcIm,dstIm,mask255,offCenter,cv2.MIXED_CLONE)
else:
outputIm=dstIm
#when one object have very few pixels, bdmsk will be totally zero, which will cause segmentation fault.
return outputIm,new_msk
示例2: merge_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None):
face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)
for group in core.OVERLAY_POINTS:
cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
if mat_multiple:
mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
if blur_detail_x and blur_detail_y:
face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center)
return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
示例3: merge_img
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, k_size=None, mat_multiple=None):
face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)
for group in core.OVERLAY_POINTS:
cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
if mat_multiple:
mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
if k_size:
face_mask = cv2.blur(face_mask, k_size, center)
return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
示例4: process
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def process(old_face, new_face, raw_mask):
height, width, _ = old_face.shape
height = height // 2
width = width // 2
y_indices, x_indices, _ = np.nonzero(raw_mask)
y_crop = slice(np.min(y_indices), np.max(y_indices))
x_crop = slice(np.min(x_indices), np.max(x_indices))
y_center = int(np.rint((np.max(y_indices) + np.min(y_indices)) / 2 + height))
x_center = int(np.rint((np.max(x_indices) + np.min(x_indices)) / 2 + width))
insertion = np.rint(new_face[y_crop, x_crop] * 255.0).astype("uint8")
insertion_mask = np.rint(raw_mask[y_crop, x_crop] * 255.0).astype("uint8")
insertion_mask[insertion_mask != 0] = 255
prior = np.rint(np.pad(old_face * 255.0,
((height, height), (width, width), (0, 0)),
'constant')).astype("uint8")
blended = cv2.seamlessClone(insertion, # pylint: disable=no-member
prior,
insertion_mask,
(x_center, y_center),
cv2.NORMAL_CLONE) # pylint: disable=no-member
blended = blended[height:-height, width:-width]
return blended.astype("float32") / 255.0
示例5: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def main( args ):
input_dir = Path( args.input_dir )
assert input_dir.is_dir()
alignments = input_dir / args.alignments
with alignments.open() as f:
alignments = json.load(f)
output_dir = input_dir / args.output_dir
output_dir.mkdir( parents=True, exist_ok=True )
args.direction = 'AtoB'
if args.direction == 'AtoB': autoencoder,otherautoencoder = autoencoder_B,autoencoder_A
if args.direction == 'BtoA': autoencoder,otherautoencoder = autoencoder_A,autoencoder_B
if args.blurSize % 2 == 0:
args.blurSize+=1
if args.erosionKernelSize>0:
erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(args.erosionKernelSize,args.erosionKernelSize))
else:
erosion_kernel = None
for e in alignments:
if len(e)<4:
raise LookupError('This script expects new format json files with face points included.')
for image_file, face_file, mat,facepoints in tqdm( alignments[args.startframe::args.frameSkip] ):
image = cv2.imread( str( input_dir / image_file ) )
face = cv2.imread( str( input_dir / face_file ) )
mat = numpy.array(mat).reshape(2,3)
if image is None: continue
if face is None: continue
new_image = convert_one_image( autoencoder, otherautoencoder, image, mat, facepoints, erosion_kernel, args.blurSize, args.seamlessClone, args.maskType, args.doublePass)
output_file = output_dir / Path(image_file).name
cv2.imwrite( str(output_file), new_image )
示例6: poisson_blend
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def poisson_blend(input, output, mask):
"""
* inputs:
- input (torch.Tensor, required)
Input tensor of Completion Network, whose shape = (N, 3, H, W).
- output (torch.Tensor, required)
Output tensor of Completion Network, whose shape = (N, 3, H, W).
- mask (torch.Tensor, required)
Input mask tensor of Completion Network, whose shape = (N, 1, H, W).
* returns:
Output image tensor of shape (N, 3, H, W) inpainted with poisson image editing method.
"""
input = input.clone().cpu()
output = output.clone().cpu()
mask = mask.clone().cpu()
mask = torch.cat((mask, mask, mask), dim=1) # convert to 3-channel format
num_samples = input.shape[0]
ret = []
for i in range(num_samples):
dstimg = transforms.functional.to_pil_image(input[i])
dstimg = np.array(dstimg)[:, :, [2, 1, 0]]
srcimg = transforms.functional.to_pil_image(output[i])
srcimg = np.array(srcimg)[:, :, [2, 1, 0]]
msk = transforms.functional.to_pil_image(mask[i])
msk = np.array(msk)[:, :, [2, 1, 0]]
# compute mask's center
xs, ys = [], []
for j in range(msk.shape[0]):
for k in range(msk.shape[1]):
if msk[j, k, 0] == 255:
ys.append(j)
xs.append(k)
xmin, xmax = min(xs), max(xs)
ymin, ymax = min(ys), max(ys)
center = ((xmax + xmin) // 2, (ymax + ymin) // 2)
dstimg = cv2.inpaint(dstimg, msk[:, :, 0], 1, cv2.INPAINT_TELEA)
out = cv2.seamlessClone(srcimg, dstimg, msk, center, cv2.NORMAL_CLONE)
out = out[:, :, [2, 1, 0]]
out = transforms.functional.to_tensor(out)
out = torch.unsqueeze(out, dim=0)
ret.append(out)
ret = torch.cat(ret, dim=0)
return ret
示例7: draw_text_seamless
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import seamlessClone [as 别名]
def draw_text_seamless(self, font, bg, word, word_color, word_height, word_width, offset):
# For better seamlessClone
seamless_offset = 6
# Draw text on a white image, than draw it on background
if self.is_bgr():
white_bg = np.ones((word_height + seamless_offset, word_width + seamless_offset, 3)) * 255
else:
white_bg = np.ones((word_height + seamless_offset, word_width + seamless_offset)) * 255
text_img = Image.fromarray(np.uint8(white_bg))
draw = ImageDraw.Draw(text_img)
# draw.text((0 + seamless_offset // 2, 0 - offset[1] + seamless_offset // 2), word,
# fill=word_color, font=font)
self.draw_text_wrapper(draw, word,
0 + seamless_offset // 2,
0 - offset[1] + seamless_offset // 2,
font, word_color)
# assume whole text_img as mask
text_img = np.array(text_img).astype(np.uint8)
text_mask = 255 * np.ones(text_img.shape, text_img.dtype)
# This is where the CENTER of the airplane will be placed
center = (bg.shape[1] // 2, bg.shape[0] // 2)
# opencv seamlessClone require bgr image
if not self.is_bgr():
text_img_bgr = np.ones((text_img.shape[0], text_img.shape[1], 3), np.uint8)
bg_bgr = np.ones((bg.shape[0], bg.shape[1], 3), np.uint8)
cv2.cvtColor(text_img, cv2.COLOR_GRAY2BGR, text_img_bgr)
cv2.cvtColor(bg, cv2.COLOR_GRAY2BGR, bg_bgr)
else:
text_img_bgr = text_img
bg_bgr = bg
flag = np.random.choice([
cv2.NORMAL_CLONE,
cv2.MIXED_CLONE,
cv2.MONOCHROME_TRANSFER
])
mixed_clone = cv2.seamlessClone(text_img_bgr, bg_bgr, text_mask, center, flag)
if not self.is_bgr():
return cv2.cvtColor(mixed_clone, cv2.COLOR_BGR2GRAY)
else:
return mixed_clone