本文整理汇总了Python中cv2.invertAffineTransform方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.invertAffineTransform方法的具体用法?Python cv2.invertAffineTransform怎么用?Python cv2.invertAffineTransform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.invertAffineTransform方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: affine_skew
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def affine_skew(self, tilt, phi, img, mask=None):
h, w = img.shape[:2]
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]])
if phi != 0.0:
phi = np.deg2rad(phi)
s, c = np.sin(phi), np.cos(phi)
A = np.float32([[c, -s], [s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32(np.dot(corners, A.T))
x, y, w, h = cv2.boundingRect(tcorners.reshape(1, -1, 2))
A = np.hstack([A, [[-x], [-y]]])
img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt * tilt - 1)
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
return img, mask, Ai
示例2: rotate_image_with_invrmat
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def rotate_image_with_invrmat(cvmat, rotateAngle):
assert (cvmat.dtype == np.uint8) , " only support normalize np.uint in rotate_image_with_invrmat'"
##Make sure cvmat is square?
height, width, channel = cvmat.shape
center = ( width//2, height//2)
rotateMatrix = cv2.getRotationMatrix2D(center, rotateAngle, 1.0)
cos, sin = np.abs(rotateMatrix[0,0]), np.abs(rotateMatrix[0, 1])
newH = int((height*sin)+(width*cos))
newW = int((height*cos)+(width*sin))
rotateMatrix[0,2] += (newW/2) - center[0] #x
rotateMatrix[1,2] += (newH/2) - center[1] #y
# rotate image
outMat = cv2.warpAffine(cvmat, rotateMatrix, (newH, newW), borderValue=(128, 128, 128))
# generate inv rotate matrix
invRotateMatrix = cv2.invertAffineTransform(rotateMatrix)
return (outMat, invRotateMatrix, (width, height))
示例3: transformPointsInverse
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def transformPointsInverse(T,width,height):
T=cv2.invertAffineTransform(T)
return transformPointsForward(T,width,height)
示例4: affine_skew
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def affine_skew(tilt, phi, img, mask=None):
'''
affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
Ai - is an affine transform matrix from skew_img to img
'''
h, w = img.shape[:2]
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]])
if phi != 0.0:
phi = np.deg2rad(phi)
s, c = np.sin(phi), np.cos(phi)
A = np.float32([[c,-s], [ s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32( np.dot(corners, A.T) )
x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
A = np.hstack([A, [[-x], [-y]]])
img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt*tilt-1)
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
return img, mask, Ai
示例5: Alignment_2
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def Alignment_2(img,std_landmark,landmark):
def Transformation(std_landmark,landmark):
std_landmark = np.matrix(std_landmark).astype(np.float64)
landmark = np.matrix(landmark).astype(np.float64)
c1 = np.mean(std_landmark, axis=0)
c2 = np.mean(landmark, axis=0)
std_landmark -= c1
landmark -= c2
s1 = np.std(std_landmark)
s2 = np.std(landmark)
std_landmark /= s1
landmark /= s2
U, S, Vt = np.linalg.svd(std_landmark.T * landmark)
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)),np.matrix([0., 0., 1.])])
Trans_Matrix = Transformation(std_landmark,landmark) # Shape: 3 * 3
Trans_Matrix = Trans_Matrix[:2]
Trans_Matrix = cv2.invertAffineTransform(Trans_Matrix)
new_img = cv2.warpAffine(img,Trans_Matrix,(img.shape[1],img.shape[0]))
Trans_Matrix = np.array(Trans_Matrix)
new_landmark = []
for i in range(landmark.shape[0]):
pts = []
pts.append(Trans_Matrix[0,0]*landmark[i,0]+Trans_Matrix[0,1]*landmark[i,1]+Trans_Matrix[0,2])
pts.append(Trans_Matrix[1,0]*landmark[i,0]+Trans_Matrix[1,1]*landmark[i,1]+Trans_Matrix[1,2])
new_landmark.append(pts)
new_landmark = np.array(new_landmark)
return new_img, new_landmark
#---------------------------------#
# 图片预处理
# 高斯归一化
#---------------------------------#
示例6: extract_box
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def extract_box(img, box, padding_factor = 0.2):
'''
we can search for whatever we want in the rotated bordered image,
Any point found can be translated back to the original image by:
1. adding the origins of the bordered area,
2. rotating the point using the inverse rotation matrix
'''
if box.angle != 0:
b_w = max(img.shape)*2
b_h = b_w
dx_center = b_w / 2 - box.center[0]
dy_center = b_h / 2 - box.center[1]
new_img = np.zeros((b_w, b_h, 3), dtype = img.dtype)
new_img[dy_center:(dy_center + img.shape[0]), dx_center:(dx_center + img.shape[1]), :] = img
box_in_big_image = box.points + np.c_[np.ones((4,1)) * dx_center, np.ones((4,1)) * dy_center]
rot_mat = cv2.getRotationMatrix2D((b_w/2, b_h/2), box.angle, scale = 1.0)
inv_rot_mat = cv2.invertAffineTransform(rot_mat)
rot_image = cv2.warpAffine(new_img, rot_mat, (new_img.shape[1],new_img.shape[0]), flags=cv2.INTER_CUBIC)
box_UL_in_rotated = (rot_mat * np.matrix([box_in_big_image[0,0], box_in_big_image[0,1], 1]).transpose()).transpose().tolist()[0]
box_coords_in_rotated = np.matrix(np.c_[box_in_big_image, np.ones((4,1))]) * rot_mat.T
box_coords_in_rotated = box_coords_in_rotated[0,:].tolist()[0] + [box.dx, box.dy]
else:
rot_mat = cv2.getRotationMatrix2D(box.center, box.angle, scale = 1.0)
inv_rot_mat = cv2.invertAffineTransform(rot_mat)
# for efficiency
rot_image = img.copy()
box_UL_in_rotated = (rot_mat * np.matrix([box.points[0,0], box.points[0,1], 1]).transpose()).transpose().tolist()[0]
box_coords_in_rotated = box_UL_in_rotated + [box.dx, box.dy]
img_with_border, Dx, Dy = extract_rect(rot_image, box_coords_in_rotated, padding_factor)
box_coords_in_bordered = [Dx, Dy] + [box.dx, box.dy]
border_UL_in_rotated = [box_UL_in_rotated[0]-Dx, box_UL_in_rotated[1]-Dy]
return img_with_border, box_coords_in_bordered, border_UL_in_rotated, inv_rot_mat
示例7: original_roi
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def original_roi(self):
""" :class: `numpy.ndarray`: The original region of interest of the mask in the
source frame. """
points = np.array([[0, 0],
[0, self.stored_size - 1],
[self.stored_size - 1, self.stored_size - 1],
[self.stored_size - 1, 0]], np.int32).reshape((-1, 1, 2))
matrix = cv2.invertAffineTransform(self._affine_matrix)
roi = cv2.transform(points, matrix).reshape((4, 2))
logger.trace("Returning: %s", roi)
return roi
示例8: get_original_roi
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def get_original_roi(self, mat, size, padding=0):
""" Return the square aligned box location on the original image """
logger.trace("matrix: %s, size: %s. padding: %s", mat, size, padding)
matrix = self.transform_matrix(mat, size, padding)
points = np.array([[0, 0], [0, size - 1], [size - 1, size - 1], [size - 1, 0]], np.int32)
points = points.reshape((-1, 1, 2))
matrix = cv2.invertAffineTransform(matrix)
logger.trace("Returning: (points: %s, matrix: %s", points, matrix)
return cv2.transform(points, matrix)
示例9: _rotate_face
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def _rotate_face(face, rotation_matrix):
""" Rotates the detection bounding box around the given rotation matrix.
Parameters
----------
face: :class:`DetectedFace`
A :class:`DetectedFace` containing the `x`, `w`, `y`, `h` detection bounding box
points.
rotation_matrix: numpy.ndarray
The rotation matrix to rotate the given object by.
Returns
-------
:class:`DetectedFace`
The same class with the detection bounding box points rotated by the given matrix.
"""
logger.trace("Rotating face: (face: %s, rotation_matrix: %s)", face, rotation_matrix)
bounding_box = [[face.left, face.top],
[face.right, face.top],
[face.right, face.bottom],
[face.left, face.bottom]]
rotation_matrix = cv2.invertAffineTransform(rotation_matrix)
points = np.array(bounding_box, "int32")
points = np.expand_dims(points, axis=0)
transformed = cv2.transform(points, rotation_matrix).astype("int32")
rotated = transformed.squeeze()
# Bounding box should follow x, y planes, so get min/max for non-90 degree rotations
pt_x = min([pnt[0] for pnt in rotated])
pt_y = min([pnt[1] for pnt in rotated])
pt_x1 = max([pnt[0] for pnt in rotated])
pt_y1 = max([pnt[1] for pnt in rotated])
width = pt_x1 - pt_x
height = pt_y1 - pt_y
face.x = int(pt_x)
face.y = int(pt_y)
face.w = int(width)
face.h = int(height)
return face
示例10: transform_points
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def transform_points(points, mat, invert=False):
if invert:
mat = cv2.invertAffineTransform (mat)
points = np.expand_dims(points, axis=1)
points = cv2.transform(points, mat, points.shape)
points = np.squeeze(points)
return points
示例11: face_swap
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import invertAffineTransform [as 别名]
def face_swap(orig_image, down_scale):
# extract face from original
facelist = extract_faces(orig_image, 256)
result_image = orig_image
# iterate through all detected faces
for (face, resized_image) in facelist:
range_ = numpy.linspace( 128-80, 128+80, 5 )
mapx = numpy.broadcast_to( range_, (5,5) )
mapy = mapx.T
# warp image like in the training
mapx = mapx + numpy.random.normal( size=(5,5), scale=5 )
mapy = mapy + numpy.random.normal( size=(5,5), scale=5 )
src_points = numpy.stack( [ mapx.ravel(), mapy.ravel() ], axis=-1 )
dst_points = numpy.mgrid[0:65:16,0:65:16].T.reshape(-1,2)
mat = umeyama( src_points, dst_points, True )[0:2]
warped_resized_image = cv2.warpAffine( resized_image, mat, (64,64) ) / 255.0
test_images = numpy.empty( ( 1, ) + warped_resized_image.shape )
test_images[0] = warped_resized_image
# predict faceswap using encoder A
figure = autoencoder_A.predict(test_images)
new_face = numpy.clip(numpy.squeeze(figure[0]) * 255.0, 0, 255).astype('uint8')
mat_inv = umeyama( dst_points, src_points, True )[0:2]
# insert face into extracted face
dest_face = blend_warp(new_face, resized_image, mat_inv)
# create an inverse affine transform matrix to insert extracted face again
mat = get_align_mat(face)
mat = mat * (256 - 2 * 48)
mat[:,2] += 48
mat_inv = cv2.invertAffineTransform(mat)
# insert new face into original image
result_image = blend_warp(dest_face, result_image, mat_inv)
# return resulting image after downscale
return cv2.resize(result_image, (result_image.shape[1] // down_scale, result_image.shape[0] // down_scale))