本文整理匯總了Python中cv2.getAffineTransform方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.getAffineTransform方法的具體用法?Python cv2.getAffineTransform怎麽用?Python cv2.getAffineTransform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.getAffineTransform方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: cv_preprocess_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def cv_preprocess_image(img, output_height, output_width, is_training):
assert output_height == output_width
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img[:, :, 0] = np.uint8((np.int32(img[:, :, 0]) + (180 + random.randrange(-9, 10))) % 180)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
rows, cols, ch = img.shape
output_size = output_width
def r():
return (random.random() - 0.5) * 0.1 * output_size
pts1 = np.float32([[0, 0], [cols, rows], [0, rows]])
pts2 = np.float32([[r(), r()], [output_size + r(), output_size + r()], [r(), output_size + r()]])
M = cv2.getAffineTransform(pts1, pts2)
noize = np.random.normal(0, random.random() * (0.05 * 255), size=img.shape)
img = np.array(img, dtype=np.float32) + noize
img = cv2.warpAffine(img, M, (output_size, output_size), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
return img
示例2: crop_face
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def crop_face(img, face:Window, crop_size=200):
x1 = face.x
y1 = face.y
x2 = face.width + face.x - 1
y2 = face.width + face.y - 1
centerX = (x1 + x2) // 2
centerY = (y1 + y2) // 2
lst = (x1, y1), (x1, y2), (x2, y2), (x2, y1)
pointlist = [rotate_point(x, y, centerX, centerY, face.angle) for x, y in lst]
srcTriangle = np.array([
pointlist[0],
pointlist[1],
pointlist[2],
], dtype=np.float32)
dstTriangle = np.array([
(0, 0),
(0, crop_size - 1),
(crop_size - 1, crop_size - 1),
], dtype=np.float32)
rotMat = cv2.getAffineTransform(srcTriangle, dstTriangle)
ret = cv2.warpAffine(img, rotMat, (crop_size, crop_size))
return ret, pointlist
示例3: affine
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def affine(self, in_mat, slope):
height = in_mat.shape[0]
width = in_mat.shape[1]
xiff = abs(slope) * height
if slope > 0:
plTri = np.float32([[0, 0], [width - xiff - 1, 0], [xiff, height - 1]])
dstTri = np.float32([[xiff / 2, 0], [width - 1 - xiff / 2, 0], [xiff / 2, height - 1]])
else:
plTri = np.float32([[xiff, 0], [width - 1, 0], [0, height - 1]])
dstTri = np.float32([[xiff / 2, 0], [width - 1 - xiff / 2, 0], [xiff / 2, height - 1]])
warp_mat = cv2.getAffineTransform(plTri, dstTri)
if in_mat.shape[0] > 36 or in_mat.shape[1] > 136:
affine_mat = cv2.warpAffine(in_mat, warp_mat, (int(height), int(width)), cv2.INTER_AREA)
else:
affine_mat = cv2.warpAffine(in_mat, warp_mat, (int(height), int(width)), cv2.INTER_CUBIC)
return affine_mat
示例4: create_affine_transform_augmentation
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def create_affine_transform_augmentation(img, random_limits=(0.8, 1.1)):
'''
Creates an augmentation by computing a homography from three
points in the image to three randomly generated points
'''
y, x = img.shape[:2]
fx = float(x)
fy = float(y)
src_point = np.float32([[fx/2, fy/3,],
[2*fx/3, 2*fy/3],
[fx/3, 2*fy/3]])
random_shift = (np.random.rand(3,2) - 0.5) * 2 * (random_limits[1]-random_limits[0])/2 + np.mean(random_limits)
dst_point = src_point * random_shift.astype(np.float32)
transform = cv2.getAffineTransform(src_point, dst_point)
borderValue = 0
if img.ndim == 3:
borderValue = np.median(np.reshape(img, (img.shape[0]*img.shape[1],-1)), axis=0)
else:
borderValue=np.median(img)
warped_img = cv2.warpAffine(img, transform, dsize=(x,y), borderValue=borderValue)
return warped_img
示例5: Affine_aug
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def Affine_aug(src,strength,label=None):
image = src
pts_base = np.float32([[10,100],[200,50],[100,250]])
pts1 = np.random.rand(3, 2) * random.uniform(-strength, strength) + pts_base
pts1 = pts1.astype(np.float32)
M = cv2.getAffineTransform(pts1, pts_base)
trans_img = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]) ,
borderMode=cv2.BORDER_CONSTANT,
borderValue=cfg.DATA.PIXEL_MEAN)
label_rotated=None
if label is not None:
label=label.T
full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
label_rotated = np.dot(M, full_label)
#label_rotated = label_rotated.astype(np.int32)
label_rotated=label_rotated.T
return trans_img,label_rotated
示例6: distort_affine_cv2
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def distort_affine_cv2(image, alpha_affine=10, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
distorted_image = cv2.warpAffine(
image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101)
return distorted_image
示例7: align_face_to_template
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def align_face_to_template(img, facial_landmarks, output_dim, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP):
"""
Aligns image by warping it to fit the landmarks on
the image (src) to the landmarks on the template (dst)
Args:
img: src image to be aligned
facial_landmarks: list of 68 landmarks (obtained from dlib)
output_dim: image output dimension
"""
np_landmarks = np.float32(facial_landmarks)
np_landmarks_idx = np.array(landmarkIndices)
H = cv2.getAffineTransform(np_landmarks[np_landmarks_idx],
output_dim * SCALED_LANDMARKS[np_landmarks_idx])
warped = cv2.warpAffine(img, H, (output_dim, output_dim))
return warped
示例8: _rois_from_img
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def _rois_from_img(self,img):
sorted_src_pts = self._find_target_coordinates(img)
dst_points = np.array([(0,-1),
(0,0),
(-1,0)], dtype=np.float32)
wrap_mat = cv2.getAffineTransform(dst_points, sorted_src_pts)
rectangles = self._make_grid(self._n_cols, self._n_rows,
self._top_margin, self._bottom_margin,
self._left_margin,self._right_margin,
self._horizontal_fill, self._vertical_fill)
shift = np.dot(wrap_mat, [1,1,0]) - sorted_src_pts[1] # point 1 is the ref, at 0,0
rois = []
for i,r in enumerate(rectangles):
r = np.append(r, np.zeros((4,1)), axis=1)
mapped_rectangle = np.dot(wrap_mat, r.T).T
mapped_rectangle -= shift
ct = mapped_rectangle.reshape((1,4,2)).astype(np.int32)
cv2.drawContours(img,[ct], -1, (255,0,0),1,LINE_AA)
rois.append(ROI(ct, idx=i+1))
# cv2.imshow("dbg",img)
# cv2.waitKey(0)
return rois
示例9: warpTriangle
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def warpTriangle(img1, img2, t1, t2):
def applyAffineTransform(src, srcTri, dstTri, size) :
warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None,
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
return dst
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
t1Rect = []
t2Rect = []
t2RectInt = []
for i in range(0, 3):
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1, 1, 1));
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
img2Rect = img2Rect * mask
img2[r2[1]: r2[1] + r2[3], r2[0]: r2[0] + r2[2]] = img2[r2[1]: r2[1] + r2[3],
r2[0]: r2[0] + r2[2]] * ((1.0, 1.0, 1.0) - mask)
img2[r2[1]: r2[1] + r2[3], r2[0]: r2[0] + r2[2]] = img2[r2[1]: r2[1] + r2[3],
r2[0]: r2[0] + r2[2]] + img2Rect
示例10: similarityTransform
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def similarityTransform(inPoints, outPoints):
s60 = math.sin(60 * math.pi / 180);
c60 = math.cos(60 * math.pi / 180);
inPts = np.copy(inPoints).tolist();
outPts = np.copy(outPoints).tolist();
xin = c60 * (inPts[0][0] - inPts[1][0]) - s60 * (inPts[0][1] - inPts[1][1]) + inPts[1][0];
yin = s60 * (inPts[0][0] - inPts[1][0]) + c60 * (inPts[0][1] - inPts[1][1]) + inPts[1][1];
inPts.append([np.int(xin), np.int(yin)]);
xout = c60 * (outPts[0][0] - outPts[1][0]) - s60 * (outPts[0][1] - outPts[1][1]) + outPts[1][0];
yout = s60 * (outPts[0][0] - outPts[1][0]) + c60 * (outPts[0][1] - outPts[1][1]) + outPts[1][1];
outPts.append([np.int(xout), np.int(yout)]);
return cv2.getAffineTransform(np.array(inPts, dtype=np.float32), np.array(outPts, dtype=np.float32))
示例11: aff_trans
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def aff_trans(img,cols,rows,img_path_mod):
#3 affine transformations
#transformation 1
pts1 = np.float32([[10,10],[10,70],[70,10]])
pts2 = np.float32([[1,1],[20,80],[80,20]])
M1 = cv2.getAffineTransform(pts1,pts2)
dst1 = cv2.warpAffine(img,M1,(cols,rows))
cv2.imwrite(img_path_mod + "_at1.jpg",dst1)
#transformation 2
pts3 = np.float32([[1,1],[20,80],[80,20]])
pts4 = np.float32([[10,10],[20,70],[70,20]])
M2 = cv2.getAffineTransform(pts3,pts4)
dst2 = cv2.warpAffine(img,M2,(cols,rows))
cv2.imwrite(img_path_mod + "_at2.jpg",dst2)
#transformation 3
pts5 = np.float32([[20,20],[10,80],[80,10]])
pts6 = np.float32([[1,1],[30,70],[70,30]])
M3 = cv2.getAffineTransform(pts5,pts6)
dst3 = cv2.warpAffine(img,M3,(cols,rows))
cv2.imwrite(img_path_mod + "_at3.jpg",dst3)
示例12: get_map_to_predict
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size,
interpolation=cv2.INTER_LINEAR):
fss = []
valids = []
center = (map_size-1.0)/2.0
dst_theta = np.pi/2.0
dst_loc = np.array([center, center])
dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)])
dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)])
def compute_points(center, x_axis, y_axis):
points = np.zeros((3,2),dtype=np.float32)
points[0,:] = center
points[1,:] = center + x_axis
points[2,:] = center + y_axis
return points
dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis)
for i in range(src_locs.shape[0]):
src_loc = src_locs[i,:]
src_x_axis = src_x_axiss[i,:]
src_y_axis = src_y_axiss[i,:]
src_points = compute_points(src_loc, src_x_axis, src_y_axis)
M = cv2.getAffineTransform(src_points, dst_points)
fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation,
borderValue=np.NaN)
valid = np.invert(np.isnan(fs))
valids.append(valid)
fss.append(fs)
return fss, valids
示例13: get_affine_transform
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def get_affine_transform(
center, scale, rot, output_size,
shift=np.array([0, 0], dtype=np.float32), inv=0
):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale * 200.0
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
示例14: affine_transform
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def affine_transform(src, src_tri, dst_tri, size):
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]),
None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
示例15: applyAffineTransform
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getAffineTransform [as 別名]
def applyAffineTransform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
return dst
# Warps and alpha blends triangular regions from img1 and img2 to img