當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.getPerspectiveTransform方法代碼示例

本文整理匯總了Python中cv2.getPerspectiveTransform方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.getPerspectiveTransform方法的具體用法?Python cv2.getPerspectiveTransform怎麽用?Python cv2.getPerspectiveTransform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.getPerspectiveTransform方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: crop_and_warp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def crop_and_warp(img, crop_rect):
	"""Crops and warps a rectangular section from an image into a square of similar size."""

	# Rectangle described by top left, top right, bottom right and bottom left points
	top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]

	# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
	src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')

	# Get the longest side in the rectangle
	side = max([
		distance_between(bottom_right, top_right),
		distance_between(top_left, bottom_left),
		distance_between(bottom_right, bottom_left),
		distance_between(top_left, top_right)
	])

	# Describe a square with side of the calculated length, this is the new perspective we want to warp to
	dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')

	# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
	m = cv2.getPerspectiveTransform(src, dst)

	# Performs the transformation on the original image
	return cv2.warpPerspective(img, m, (int(side), int(side))) 
開發者ID:aakashjhawar,項目名稱:SolveSudoku,代碼行數:27,代碼來源:SudokuExtractor.py

示例2: align

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def align(image, points):
    """
    :param image:
    :param points:
    :return: aligned image
    """
    # alignment
    origin_point = np.require(np.array(points).reshape((4, 2)), dtype=np.single)
    height = int(max(np.linalg.norm(origin_point[0] - origin_point[1]), np.linalg.norm(origin_point[2] - origin_point[3])))
    width = int(max(np.linalg.norm(origin_point[0] - origin_point[3]), np.linalg.norm(origin_point[1] - origin_point[2])))

    target_point = np.float32([[0, 0], [0, height], [width, height], [width, 0]])
    map_matrix = cv2.getPerspectiveTransform(origin_point, target_point)
    cols = width + 1
    rows = height + 1
    color = cv2.warpPerspective(image, map_matrix, (cols, rows))
    return color 
開發者ID:SunskyF,項目名稱:EasyPR-python,代碼行數:19,代碼來源:align.py

示例3: Perspective_aug

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def Perspective_aug(src,strength,label=None):
    image = src
    pts_base = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
    pts1=np.random.rand(4, 2)*random.uniform(-strength,strength)+pts_base
    pts1=pts1.astype(np.float32)
    #pts1 =np.float32([[56, 65], [368, 52], [28, 387], [389, 398]])
    M = cv2.getPerspectiveTransform(pts1, pts_base)
    trans_img = cv2.warpPerspective(image, M, (src.shape[1], src.shape[0]))

    label_rotated=None
    if label is not  None:
        label=label.T
        full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
        label_rotated = np.dot(M, full_label)
        label_rotated=label_rotated.astype(np.int32)
        label_rotated=label_rotated.T
    return trans_img,label_rotated 
開發者ID:610265158,項目名稱:face_landmark,代碼行數:19,代碼來源:augmentation.py

示例4: pivot_stitch

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def pivot_stitch(img, wd):
    # Stitch the area in between
    D = stitch(img[:, 1280 - wd:1280], img[:, 1280:1280 + wd], sigma=15.0)

    # Warp backwards
    pt1 = np.dot(D['H'], [wd, 400, 1])
    pt3 = np.dot(D['H'], [wd, 800, 1])
    pt1 = pt1 / pt1[2]
    pt3 = pt3 / pt3[2]
    src = np.zeros((4, 2), np.float32)
    dst = np.zeros((4, 2), np.float32)
    src[0] = [0, 0]
    src[1] = pt1[:2]
    src[2] = [0, 1280]
    src[3] = pt3[:2]
    dst = np.array(src)
    dst[1] = [2 * wd - 1, 400]
    dst[3] = [2 * wd - 1, 800]

    result = np.copy(img)
    M = cv2.getPerspectiveTransform(src, dst)
    result[:, 1280 - wd:1280 +
           wd] = cv2.warpPerspective(D['res'], M, (2 * wd, 1280))
    result[:, 1280 - wd:1280 + wd] = D['res']
    return result 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:27,代碼來源:.demo.py

示例5: do_rotation_transform

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def do_rotation_transform(image, mask, angle=0):
    height, width = image.shape[:2]
    cc = np.cos(angle / 180 * np.pi)
    ss = np.sin(angle / 180 * np.pi)
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = box0 - np.array([width / 2, height / 2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2, height / 2])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101,
                                borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101,
                               borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
開發者ID:tugstugi,項目名稱:pytorch-saltnet,代碼行數:24,代碼來源:unet_transforms.py

示例6: do_horizontal_shear

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def do_horizontal_shear(image, mask, scale=0):
    height, width = image.shape[:2]
    dx = int(scale * width)

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = np.array([[+dx, 0], [width + dx, 0], [width - dx, height], [-dx, height], ], np.float32)

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
開發者ID:tugstugi,項目名稱:pytorch-saltnet,代碼行數:19,代碼來源:unet_transforms.py

示例7: per_trans

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def per_trans(img,cols,rows,img_path_mod):
    #3 rotational transformations
    #transformation 1
    pts7 = np.float32([[2,3],[93,4],[5,90],[92,91]])
    pts8 = np.float32([[0,0],[96,0],[0,96],[96,96]])
    M8 = cv2.getPerspectiveTransform(pts7,pts8)
    dst8 = cv2.warpPerspective(img,M8,(96,96))
    cv2.imwrite(img_path_mod + '_pt1.jpg',dst8)
    #transformation 2
    pts9 = np.float32([[6,7],[89,8],[9,87],[85,88]])
    pts10 = np.float32([[0,0],[96,0],[0,96],[96,96]])
    M9 = cv2.getPerspectiveTransform(pts9,pts10)
    dst9 = cv2.warpPerspective(img,M9,(96,96))
    cv2.imwrite(img_path_mod + '_pt2.jpg',dst9)
    #transformation 3
    pts11 = np.float32([[10,11],[93,12],[13,82],[83,84]])
    pts12 = np.float32([[0,0],[96,0],[0,96],[96,96]])
    M10 = cv2.getPerspectiveTransform(pts11,pts12)
    dst10 = cv2.warpPerspective(img,M10,(96,96))
    cv2.imwrite(img_path_mod + '_pt3.jpg',dst10) 
開發者ID:muxspace,項目名稱:facial_expressions,代碼行數:22,代碼來源:cBuckley.py

示例8: perspective_transform

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def perspective_transform(img, points):
    """Applies a 4-point perspective transformation in `img` so that `points`
    are the new corners."""
    source = np.array(
        points,
        dtype="float32")

    dest = np.array([
        [TRANSF_SIZE, TRANSF_SIZE],
        [0, TRANSF_SIZE],
        [0, 0],
        [TRANSF_SIZE, 0]],
        dtype="float32")

    transf = cv2.getPerspectiveTransform(source, dest)
    warped = cv2.warpPerspective(img, transf, (TRANSF_SIZE, TRANSF_SIZE))
    return warped 
開發者ID:rbaron,項目名稱:omr,代碼行數:19,代碼來源:omr.py

示例9: shift_scale_rotate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def shift_scale_rotate(img, angle, scale, dx, dy, borderMode=cv2.BORDER_CONSTANT):
    height, width = img.shape[:2]

    cc = math.cos(angle / 180 * math.pi) * scale
    ss = math.sin(angle / 180 * math.pi) * scale
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
    box1 = box0 - np.array([width / 2, height / 2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx * width, height / 2 + dy * height])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)
    img = cv2.warpPerspective(img, mat, (width, height),
                              flags=cv2.INTER_NEAREST,
                              borderMode=borderMode)

    return img 
開發者ID:SpaceNetChallenge,項目名稱:SpaceNet_Off_Nadir_Solutions,代碼行數:21,代碼來源:dense_transform.py

示例10: shift_scale_rotate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def shift_scale_rotate(img, angle, scale, dx, dy):
    height, width = img.shape[:2]

    cc = math.cos(angle/180*math.pi) * scale
    ss = math.sin(angle/180*math.pi) * scale
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0],  [width, height], [0, height], ])
    box1 = box0 - np.array([width/2, height/2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)
    img = cv2.warpPerspective(img, mat, (width, height),
                              flags=cv2.INTER_LINEAR,
                              borderMode=cv2.BORDER_REFLECT_101)

    return img 
開發者ID:selimsef,項目名稱:dsb2018_topcoders,代碼行數:21,代碼來源:functional.py

示例11: crop_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def crop_image(src, points, dst_height=None):
    """
    Crop heat map with points.
    :param src: 8-bit single-channel image (map).
    :param points: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]].
    :return: dst_heat_map: Cropped image. 8-bit single-channel image (map) of heat map.
             src_points: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]].
             dst_points: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]].
    """
    src_image = src.copy()
    src_points = np.float32(points)
    width = round((cal_distance(points[0], points[1]) + cal_distance(points[2], points[3])) / 2)
    height = round((cal_distance(points[1], points[2]) + cal_distance(points[3], points[0])) / 2)
    if dst_height is not None:
        ratio = dst_height / min(width, height)
        width = int(width * ratio)
        height = int(height * ratio)
    crop_points = np.float32([[0, 0], [width, 0], [width, height], [0, height]])
    perspective_mat = cv2.getPerspectiveTransform(src=src_points, dst=crop_points)
    dst_heat_map = cv2.warpPerspective(src_image, perspective_mat, (width, height),
                                       borderValue=0, borderMode=cv2.BORDER_CONSTANT)
    return dst_heat_map, src_points, crop_points 
開發者ID:RubanSeven,項目名稱:CRAFT_keras,代碼行數:24,代碼來源:fake_util.py

示例12: un_warping

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def un_warping(box, src_points, crop_points):
    """
    Unwarp the character bounding boxes.
    :param box: The character bounding box.
    :param src_points: Points before crop.
    :param crop_points: Points after crop.
    :return: The character bounding boxes after unwarp.
    """
    perspective_mat = cv2.getPerspectiveTransform(src=crop_points, dst=src_points)
    new_box = list()
    for x, y in box:
        new_x = int((perspective_mat[0][0] * x + perspective_mat[0][1] * y + perspective_mat[0][2]) /
                    (perspective_mat[2][0] * x + perspective_mat[2][1] * y + perspective_mat[2][2]))
        new_y = int((perspective_mat[1][0] * x + perspective_mat[1][1] * y + perspective_mat[1][2]) /
                    (perspective_mat[2][0] * x + perspective_mat[2][1] * y + perspective_mat[2][2]))
        new_box.append([new_x, new_y])
    return new_box 
開發者ID:RubanSeven,項目名稱:CRAFT_keras,代碼行數:19,代碼來源:fake_util.py

示例13: perspective_transform

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def perspective_transform(src, dst_shape, dst_points):
        """
        Perspective Transform
        :param src: Image to transform.
        :param dst_shape: Tuple of 2 intergers(rows and columns).
        :param dst_points: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]].
        :return: Image after perspective transform.
        """
        img = src.copy()
        h, w = img.shape[:2]
        src_points = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
        dst_points = np.float32(dst_points)
        perspective_mat = cv2.getPerspectiveTransform(src=src_points, dst=dst_points)
        dst = cv2.warpPerspective(img, perspective_mat, (dst_shape[1], dst_shape[0]),
                                  borderValue=0, borderMode=cv2.BORDER_CONSTANT)
        return dst 
開發者ID:RubanSeven,項目名稱:CRAFT_keras,代碼行數:18,代碼來源:gaussian.py

示例14: drawFloorCrop

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def drawFloorCrop(event, x, y, flags, params):
    global perspectiveMatrix, name, RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow(f'Floor Corners for {name}')
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h = params['imgFloorCorners'].shape[0]
        # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x,y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x,y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(
                imgCroppingPolygon,
                [np.reshape(
                    params['croppingPolygons'][name],
                    (len(params['croppingPolygons'][name]),2)
                )],
                BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) 
開發者ID:colinlaney,項目名稱:animal-tracking,代碼行數:40,代碼來源:track.py

示例15: normalize_contrs

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getPerspectiveTransform [as 別名]
def normalize_contrs(img,cntr_pts):
  ratio = img.shape[0] / 300.0
  norm_pts = np.zeros((4,2), dtype="float32")

  s = cntr_pts.sum(axis=1)
  norm_pts[0] = cntr_pts[np.argmin(s)]
  norm_pts[2] = cntr_pts[np.argmax(s)]

  d = np.diff(cntr_pts,axis=1)
  norm_pts[1] = cntr_pts[np.argmin(d)]
  norm_pts[3] = cntr_pts[np.argmax(d)]

  norm_pts *= ratio

  (top_left, top_right, bottom_right, bottom_left) = norm_pts

  width1 = np.sqrt(((bottom_right[0] - bottom_left[0]) ** 2) + ((bottom_right[1] - bottom_left[1]) ** 2))
  width2 = np.sqrt(((top_right[0] - top_left[0]) ** 2) + ((top_right[1] - top_left[1]) ** 2))
  height1 = np.sqrt(((top_right[0] - bottom_right[0]) ** 2) + ((top_right[1] - bottom_right[1]) ** 2))
  height2 = np.sqrt(((top_left[0] - bottom_left[0]) ** 2) + ((top_left[1] - bottom_left[1]) ** 2))

  max_width = max(int(width1), int(width2))
  max_height = max(int(height1), int(height2))

  dst = np.array([[0,0], [max_width -1, 0],[max_width -1, max_height -1],[0, max_height-1]], dtype="float32")
  persp_matrix = cv2.getPerspectiveTransform(norm_pts,dst)
  return cv2.warpPerspective(img,persp_matrix,(max_width,max_height)) 
開發者ID:arturaugusto,項目名稱:display_ocr,代碼行數:29,代碼來源:digital_display_ocr.py


注:本文中的cv2.getPerspectiveTransform方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。