当前位置: 首页>>代码示例>>Python>>正文


Python cv2.warpAffine方法代码示例

本文整理汇总了Python中cv2.warpAffine方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.warpAffine方法的具体用法?Python cv2.warpAffine怎么用?Python cv2.warpAffine使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.warpAffine方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cv2_letterbox_resize

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def cv2_letterbox_resize(img, expected_size):
    ih, iw = img.shape[0:2]
    ew, eh = expected_size
    scale = min(eh / ih, ew / iw)
    nh = int(ih * scale)
    nw = int(iw * scale)
    smat = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]], np.float32)
    top = (eh - nh) // 2
    bottom = eh - nh - top
    left = (ew - nw) // 2
    right = ew - nw - left
    tmat = np.array([[1, 0, left], [0, 1, top], [0, 0, 1]], np.float32)
    amat = np.dot(tmat, smat)
    amat_ = amat[:2, :]
    dst = cv2.warpAffine(img, amat_, expected_size)
    if dst.ndim == 2:
        dst = np.expand_dims(dst, axis=-1)
    return dst, amat 
开发者ID:akkaze,项目名称:tf2-yolo3,代码行数:20,代码来源:utils.py

示例2: cv_rotate

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def cv_rotate(image, landmarks, heatmap, rot, scale, resolution=256):
    img_mat = cv2.getRotationMatrix2D((resolution//2, resolution//2), rot, scale)
    ones = np.ones(shape=(landmarks.shape[0], 1))
    stacked_landmarks = np.hstack([landmarks, ones])
    new_landmarks = img_mat.dot(stacked_landmarks.T).T
    if np.max(new_landmarks) > 255 or np.min(new_landmarks) < 0:
        return image, landmarks, heatmap
    else:
        new_image = cv2.warpAffine(image, img_mat, (resolution, resolution))
        if heatmap is not None:
            new_heatmap = np.zeros((heatmap.shape[0], 64, 64))
            for i in range(heatmap.shape[0]):
                if new_landmarks[i][0] > 0:
                    new_heatmap[i] = draw_gaussian(new_heatmap[i],
                                                   new_landmarks[i]/4.0+1, 1)
        return new_image, new_landmarks, new_heatmap 
开发者ID:protossw512,项目名称:AdaptiveWingLoss,代码行数:18,代码来源:utils.py

示例3: get_transform

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def get_transform(self, img):
        center = img.shape[1::-1] * self._rand_range(
            self.center_range[0], self.center_range[1], (2,))
        deg = self._rand_range(-self.max_deg, self.max_deg)
        if self.step_deg:
            deg = deg // self.step_deg * self.step_deg
        """
        The correct center is shape*0.5-0.5. This can be verified by:

        SHAPE = 7
        arr = np.random.rand(SHAPE, SHAPE)
        orig = arr
        c = SHAPE * 0.5 - 0.5
        c = (c, c)
        for k in range(4):
            mat = cv2.getRotationMatrix2D(c, 90, 1)
            arr = cv2.warpAffine(arr, mat, arr.shape)
        assert np.all(arr == orig)
        """
        mat = cv2.getRotationMatrix2D(tuple(center - 0.5), deg, 1)
        return WarpAffineTransform(
            mat, img.shape[1::-1], interp=self.interp,
            borderMode=self.border, borderValue=self.border_value) 
开发者ID:tensorpack,项目名称:dataflow,代码行数:25,代码来源:geometry.py

示例4: merge_img

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None):
    face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)

    for group in core.OVERLAY_POINTS:
        cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))

    r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))

    center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))

    if mat_multiple:
        mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
        face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))

    if blur_detail_x and blur_detail_y:
        face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center)

    return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE) 
开发者ID:gyp03,项目名称:yry,代码行数:20,代码来源:morpher.py

示例5: center_extent

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def center_extent(image, size):
	# Grab the extent width and height
	(w, h) = size

	# When the width is greater than the height
	if image.shape[1] > image.shape[0]:
		image = imutils.resize(image, width=w)
	# When the height is greater than the width
	else:
		image = imutils.resize(image, height=h)

	# Save memory for the extent of the image and grab it
	extent = np.zeros((h, w), dtype="uint8")
	offset_x = (w - image.shape[1]) // 2
	offset_y = (h - image.shape[0]) // 2
	extent[offset_y:offset_y + image.shape[0], offset_x:offset_x + image.shape[1]] = image

	# Compute the center of mass of the image and then move the center of mass to the center of the image
	(c_y, c_x) = np.round(mahotas.center_of_mass(extent)).astype("int32")
	(d_x, d_y) = ((size[0] // 2) - c_x, (size[1] // 2) - c_y)
	matrix = np.float32([[1, 0, d_x], [0, 1, d_y]])
	extent = cv2.warpAffine(extent, matrix, size)

	# Return the extent of the image
	return extent 
开发者ID:hsSam,项目名称:PracticalPythonAndOpenCV_CaseStudies,代码行数:27,代码来源:dataset.py

示例6: cv_preprocess_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def cv_preprocess_image(img, output_height, output_width, is_training):
        assert output_height == output_width
        img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        img[:, :, 0] = np.uint8((np.int32(img[:, :, 0]) + (180 + random.randrange(-9, 10))) % 180)
        img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
        rows, cols, ch = img.shape
        output_size = output_width

        def r():
            return (random.random() - 0.5) * 0.1 * output_size
        pts1 = np.float32([[0, 0], [cols, rows], [0, rows]])
        pts2 = np.float32([[r(), r()], [output_size + r(), output_size + r()], [r(), output_size + r()]])
        M = cv2.getAffineTransform(pts1, pts2)
        noize = np.random.normal(0, random.random() * (0.05 * 255), size=img.shape)
        img = np.array(img, dtype=np.float32) + noize
        img = cv2.warpAffine(img, M, (output_size, output_size), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
        return img 
开发者ID:yuantailing,项目名称:ctw-baseline,代码行数:19,代码来源:chineselib.py

示例7: __rotate_image_size_corrected

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def __rotate_image_size_corrected(image, angle):
    # Calculate max size for the rotated template and image offset
    image_size_height, image_size_width = image.shape
    image_center_x = image_size_width // 2
    image_center_y = image_size_height // 2

    # Create rotation matrix
    rotation_matrix = cv2.getRotationMatrix2D((image_center_x, image_center_y), -angle, 1)

    # Apply offset
    new_image_size = int(math.ceil(cv2.norm((image_size_height, image_size_width), normType=cv2.NORM_L2)))
    rotation_matrix[0, 2] += (new_image_size - image_size_width) / 2
    rotation_matrix[1, 2] += (new_image_size - image_size_height) / 2

    # Apply rotation to the template
    image_rotated = cv2.warpAffine(image, rotation_matrix, (new_image_size, new_image_size))
    return image_rotated 
开发者ID:microsoft,项目名称:AI-Robot-Challenge-Lab,代码行数:19,代码来源:cv_detection_right_hand.py

示例8: image_loader

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def image_loader(self, path, points):
        if os.path.exists(path):
            img = cv2.imread(path)
            three_points = np.zeros((3, 2))
            three_points[0] = np.array(points[:2])  # the location of the left eye
            three_points[1] = np.array(points[2:4]) # the location of the right eye
            three_points[2] = np.array([(points[6] + points[8]) / 2, (points[7] + points[9]) / 2]) # the location of the center of the mouth
            three_points.astype(np.float32)
            M = transformation_from_points(three_points, self.scale)
            align_img = cv2.warpAffine(img, M, self.ori_scale, borderValue=[127, 127, 127])
            l = int(round(self.ori_scale[0] / 2 - self.crop_width / 2 + self.random_x))
            r = int(round(self.ori_scale[0] / 2 + self.crop_width / 2 + self.random_x))
            t = int(round(self.ori_scale[1] / 2 - self.crop_height / 2 + self.crop_center_y_offset + self.random_y))
            d = int(round(self.ori_scale[1] / 2 + self.crop_height / 2 + self.crop_center_y_offset + self.random_y))
            align_img2 = align_img[t:d, l:r, :]
            align_img2 = cv2.resize(align_img2, self.output_scale)
            return align_img2
        else:
            raise ("image = 0") 
开发者ID:Hangz-nju-cuhk,项目名称:Talking-Face-Generation-DAVS,代码行数:21,代码来源:face_align.py

示例9: pre_process

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def pre_process(image, cfg=None, scale=1, meta=None):
    height, width = image.shape[0:2]
    new_height = int(height * scale)
    new_width  = int(width * scale)
    mean = np.array(cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3)
    std = np.array(cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3)

    inp_height, inp_width = cfg.MODEL.INPUT_H, cfg.MODEL.INPUT_W
    c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
    s = max(height, width) * 1.0


    trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
    resized_image = cv2.resize(image, (new_width, new_height))
    inp_image = cv2.warpAffine(
      resized_image, trans_input, (inp_width, inp_height),
      flags=cv2.INTER_LINEAR)
    inp_image = ((inp_image / 255. - mean) / std).astype(np.float32)

    images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
    images = torch.from_numpy(images)
    meta = {'c': c, 's': s, 
            'out_height': inp_height // cfg.MODEL.DOWN_RATIO, 
            'out_width': inp_width // cfg.MODEL.DOWN_RATIO}
    return images, meta 
开发者ID:tensorboy,项目名称:centerpose,代码行数:27,代码来源:convert2onnx.py

示例10: get_map_to_predict

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size,
                       interpolation=cv2.INTER_LINEAR):
  fss = []
  valids = []

  center = (map_size-1.0)/2.0
  dst_theta = np.pi/2.0
  dst_loc = np.array([center, center])
  dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)])
  dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)])

  def compute_points(center, x_axis, y_axis):
    points = np.zeros((3,2),dtype=np.float32)
    points[0,:] = center
    points[1,:] = center + x_axis
    points[2,:] = center + y_axis
    return points

  dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis)
  for i in range(src_locs.shape[0]):
    src_loc = src_locs[i,:]
    src_x_axis = src_x_axiss[i,:]
    src_y_axis = src_y_axiss[i,:]
    src_points = compute_points(src_loc, src_x_axis, src_y_axis)
    M = cv2.getAffineTransform(src_points, dst_points)

    fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation,
                        borderValue=np.NaN)
    valid = np.invert(np.isnan(fs))
    valids.append(valid)
    fss.append(fs)
  return fss, valids 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:34,代码来源:map_utils.py

示例11: random_rotate

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def random_rotate(self, img):
        """Random rotations by 0, 90, 180, 360 degrees"""
        theta = np.random.choice([0, 90, 180, 360])
        if theta == 0:
            return img
        h, w, _ = img.shape
        mat = cv2.getRotationMatrix2D((w / 2, h / 2), theta, 1)
        return cv2.warpAffine(img, mat, (w, h)) 
开发者ID:preritj,项目名称:progressive_growing_of_GANs,代码行数:10,代码来源:utils.py

示例12: apply_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def apply_image(self, img):
        ret = cv2.warpAffine(img, self.mat, self.dsize,
                             flags=self.interp,
                             borderMode=self.borderMode,
                             borderValue=self.borderValue)
        if img.ndim == 3 and ret.ndim == 2:
            ret = ret[:, :, np.newaxis]
        return ret 
开发者ID:tensorpack,项目名称:dataflow,代码行数:10,代码来源:transform.py

示例13: deskew

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def deskew(img):
	m = cv2.moments(img)
	if abs(m['mu02']) < 1e-2:
		return img.copy()
	skew = m['mu11']/m['mu02']
	M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
	img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
	return img
#来自opencv的sample,用于svm训练 
开发者ID:wzh191920,项目名称:License-Plate-Recognition,代码行数:11,代码来源:predict.py

示例14: norm_crop

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def norm_crop(img, landmark, image_size=112, mode='arcface'):
  M, pose_index = estimate_norm(landmark, image_size, mode)
  warped = cv2.warpAffine(img,M, (image_size, image_size), borderValue = 0.0)
  return warped 
开发者ID:deepinsight,项目名称:insightface,代码行数:6,代码来源:face_align.py

示例15: transform

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import warpAffine [as 别名]
def transform(data, center, output_size, scale, rotation):
    scale_ratio = float(output_size)/scale
    rot = float(rotation)*np.pi/180.0
    #translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
    t1 = stf.SimilarityTransform(scale=scale_ratio)
    cx = center[0]*scale_ratio
    cy = center[1]*scale_ratio
    t2 = stf.SimilarityTransform(translation=(-1*cx, -1*cy))
    t3 = stf.SimilarityTransform(rotation=rot)
    t4 = stf.SimilarityTransform(translation=(output_size/2, output_size/2))
    t = t1+t2+t3+t4
    trans = t.params[0:2]
    #print('M', scale, rotation, trans)
    cropped = cv2.warpAffine(data,trans,(output_size, output_size), borderValue = 0.0)
    return cropped, trans 
开发者ID:deepinsight,项目名称:insightface,代码行数:17,代码来源:img_helper.py


注:本文中的cv2.warpAffine方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。