當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.warpPerspective方法代碼示例

本文整理匯總了Python中cv2.warpPerspective方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.warpPerspective方法的具體用法?Python cv2.warpPerspective怎麽用?Python cv2.warpPerspective使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.warpPerspective方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: draw_lane_fit

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def draw_lane_fit(undist, warped ,Minv, left_fitx, right_fitx, ploty):
	# Drawing
	# Create an image to draw the lines on
	warp_zero = np.zeros_like(warped).astype(np.uint8)
	color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

	# Recast the x and y points into usable format for cv2.fillPoly()
	pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
	pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
	pts = np.hstack((pts_left, pts_right))

	# Draw the lane onto the warped blank image
	cv2.fillPoly(color_warp, np.int_([pts]), (0,255,0))

	# Warp the blank back to original image space using inverse perspective matrix(Minv)
	newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))
	# Combine the result with the original image
	result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)

	return result 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:22,代碼來源:image_process.py

示例2: wrap_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def wrap_images(src, dst):
	"""
	apply the wrap to images
	"""
	# load M, Minv
	img_size = (1280, 720)
	pickle_file = open("../helper/trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]
	Minv = trans_pickle["Minv"]
	# loop the file folder
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_wraped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# no need to covert RGB to BGR since 3 channel is same
		image_wraped = cv2.cvtColor(image_wraped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, image_wraped) 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:25,代碼來源:helpers.py

示例3: test

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def test():
	pickle_file = open("trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]  
	Minv = trans_pickle["Minv"]

	img_size = (1280, 720)

	image_files = glob.glob("../output_images/undistort/*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = "../output_images/perspect_trans/"+file_name
		print(out_image)
		# convert to opencv BGR format
		warped = cv2.cvtColor(warped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, warped) 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:22,代碼來源:view_perspective.py

示例4: _perspective_transform_augment_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def _perspective_transform_augment_images(self, images, random_state, parents, hooks):
    result = images
    if not self.keep_size:
        result = list(result)

    matrices, max_heights, max_widths = self._create_matrices(
        [image.shape for image in images],
        random_state
    )

    for i, (M, max_height, max_width) in enumerate(zip(matrices, max_heights, max_widths)):
        warped = cv2.warpPerspective(images[i], M, (max_width, max_height))
        if warped.ndim == 2 and images[i].ndim == 3:
            warped = np.expand_dims(warped, 2)
        if self.keep_size:
            h, w = images[i].shape[0:2]
            warped = ia.imresize_single_image(warped, (h, w))

        result[i] = warped

    return result 
開發者ID:neptune-ai,項目名稱:open-solution-salt-identification,代碼行數:23,代碼來源:augmentation.py

示例5: get_img

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def get_img(self):
        while True:
            img_name = self.image_files[self.index]
            label_name = img_name.replace('.jpg', '.png')
            img = cv2.imread(img_name)
            if img is None:
                print("load img failed:", img_name)
                self.next_img()
            else:
                break
        if self.birdeye == True:
            warped_img = cv2.warpPerspective(img, self.M, (4000, 4000),flags=cv2.INTER_CUBIC)
            img   = cv2.resize(warped_img, (self.cols, self.rows), interpolation=cv2.INTER_CUBIC)
        else:
            img   = cv2.resize(img, (self.cols, self.rows), interpolation=cv2.INTER_CUBIC)
        img   = img.transpose((2,0,1))
        return img, label_name 
開發者ID:qixuxiang,項目名稱:Baidu_Lane_Segmentation,代碼行數:19,代碼來源:reader.py

示例6: crop_and_warp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def crop_and_warp(img, crop_rect):
	"""Crops and warps a rectangular section from an image into a square of similar size."""

	# Rectangle described by top left, top right, bottom right and bottom left points
	top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]

	# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
	src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')

	# Get the longest side in the rectangle
	side = max([
		distance_between(bottom_right, top_right),
		distance_between(top_left, bottom_left),
		distance_between(bottom_right, bottom_left),
		distance_between(top_left, top_right)
	])

	# Describe a square with side of the calculated length, this is the new perspective we want to warp to
	dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')

	# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
	m = cv2.getPerspectiveTransform(src, dst)

	# Performs the transformation on the original image
	return cv2.warpPerspective(img, m, (int(side), int(side))) 
開發者ID:aakashjhawar,項目名稱:SolveSudoku,代碼行數:27,代碼來源:SudokuExtractor.py

示例7: align

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def align(image, points):
    """
    :param image:
    :param points:
    :return: aligned image
    """
    # alignment
    origin_point = np.require(np.array(points).reshape((4, 2)), dtype=np.single)
    height = int(max(np.linalg.norm(origin_point[0] - origin_point[1]), np.linalg.norm(origin_point[2] - origin_point[3])))
    width = int(max(np.linalg.norm(origin_point[0] - origin_point[3]), np.linalg.norm(origin_point[1] - origin_point[2])))

    target_point = np.float32([[0, 0], [0, height], [width, height], [width, 0]])
    map_matrix = cv2.getPerspectiveTransform(origin_point, target_point)
    cols = width + 1
    rows = height + 1
    color = cv2.warpPerspective(image, map_matrix, (cols, rows))
    return color 
開發者ID:SunskyF,項目名稱:EasyPR-python,代碼行數:19,代碼來源:align.py

示例8: _solve

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def _solve(img1, img2):
    h, w, d = img1.shape

    # step 1: Find homography of 2 images
    homo = homography(img2, img1)

    # step 2: warp image2 to image1 frame
    img2_w = cv.warpPerspective(img2, homo, (w, h))

    # step 3: resolve highlights by picking the best pixels out of two images
    im1 = _resolve_spec(img1, img2_w)

    # step 4: repeat the same process for Image2 using warped Image1
    im_w = cv.warpPerspective(im1, np.linalg.inv(homo), (w, h))
    im2 = _resolve_spec(img2, im_w)

    return im1, im2 
開發者ID:gmichaeljaison,項目名稱:specularity-removal,代碼行數:19,代碼來源:main.py

示例9: Perspective_aug

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def Perspective_aug(src,strength,label=None):
    image = src
    pts_base = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
    pts1=np.random.rand(4, 2)*random.uniform(-strength,strength)+pts_base
    pts1=pts1.astype(np.float32)
    #pts1 =np.float32([[56, 65], [368, 52], [28, 387], [389, 398]])
    M = cv2.getPerspectiveTransform(pts1, pts_base)
    trans_img = cv2.warpPerspective(image, M, (src.shape[1], src.shape[0]))

    label_rotated=None
    if label is not  None:
        label=label.T
        full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
        label_rotated = np.dot(M, full_label)
        label_rotated=label_rotated.astype(np.int32)
        label_rotated=label_rotated.T
    return trans_img,label_rotated 
開發者ID:610265158,項目名稱:face_landmark,代碼行數:19,代碼來源:augmentation.py

示例10: pivot_stitch

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def pivot_stitch(img, wd):
    # Stitch the area in between
    D = stitch(img[:, 1280 - wd:1280], img[:, 1280:1280 + wd], sigma=15.0)

    # Warp backwards
    pt1 = np.dot(D['H'], [wd, 400, 1])
    pt3 = np.dot(D['H'], [wd, 800, 1])
    pt1 = pt1 / pt1[2]
    pt3 = pt3 / pt3[2]
    src = np.zeros((4, 2), np.float32)
    dst = np.zeros((4, 2), np.float32)
    src[0] = [0, 0]
    src[1] = pt1[:2]
    src[2] = [0, 1280]
    src[3] = pt3[:2]
    dst = np.array(src)
    dst[1] = [2 * wd - 1, 400]
    dst[3] = [2 * wd - 1, 800]

    result = np.copy(img)
    M = cv2.getPerspectiveTransform(src, dst)
    result[:, 1280 - wd:1280 +
           wd] = cv2.warpPerspective(D['res'], M, (2 * wd, 1280))
    result[:, 1280 - wd:1280 + wd] = D['res']
    return result 
開發者ID:cynricfu,項目名稱:dual-fisheye-video-stitching,代碼行數:27,代碼來源:.demo.py

示例11: _normalize_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def _normalize_image(self, image: np.ndarray,
                         eye_or_face: FaceParts) -> None:
        camera_matrix_inv = np.linalg.inv(self.camera.camera_matrix)
        normalized_camera_matrix = self.normalized_camera.camera_matrix

        scale = self._get_scale_matrix(eye_or_face.distance)
        conversion_matrix = scale @ eye_or_face.normalizing_rot.as_matrix()

        projection_matrix = normalized_camera_matrix @ conversion_matrix @ camera_matrix_inv

        normalized_image = cv2.warpPerspective(
            image, projection_matrix,
            (self.normalized_camera.width, self.normalized_camera.height))

        if eye_or_face.name in {FacePartsName.REYE, FacePartsName.LEYE}:
            normalized_image = cv2.cvtColor(normalized_image,
                                            cv2.COLOR_BGR2GRAY)
            normalized_image = cv2.equalizeHist(normalized_image)
        eye_or_face.normalized_image = normalized_image 
開發者ID:hysts,項目名稱:pytorch_mpiigaze,代碼行數:21,代碼來源:head_pose_normalizer.py

示例12: warpImage

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def warpImage(src, theta, phi, gamma, scale, fovy):
    halfFovy = fovy * 0.5
    d = math.hypot(src.shape[1], src.shape[0])
    sideLength = scale * d / math.cos(deg2Rad(halfFovy))
    sideLength = np.int32(sideLength)

    M = warpMatrix(src.shape[1], src.shape[0], theta, phi, gamma, scale, fovy)
    dst = cv2.warpPerspective(src, M, (sideLength, sideLength))
    mid_x = mid_y = dst.shape[0] // 2
    target_x = target_y = src.shape[0] // 2
    offset = (target_x % 2)

    if len(dst.shape) == 3:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset,
              :]
    else:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset]

    return dst 
開發者ID:timctho,項目名稱:convolutional-pose-machines-tensorflow,代碼行數:23,代碼來源:cpm_utils.py

示例13: do_rotation_transform

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def do_rotation_transform(image, mask, angle=0):
    height, width = image.shape[:2]
    cc = np.cos(angle / 180 * np.pi)
    ss = np.sin(angle / 180 * np.pi)
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = box0 - np.array([width / 2, height / 2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2, height / 2])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101,
                                borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101,
                               borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
開發者ID:tugstugi,項目名稱:pytorch-saltnet,代碼行數:24,代碼來源:unet_transforms.py

示例14: do_horizontal_shear

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def do_horizontal_shear(image, mask, scale=0):
    height, width = image.shape[:2]
    dx = int(scale * width)

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = np.array([[+dx, 0], [width + dx, 0], [width - dx, height], [-dx, height], ], np.float32)

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
開發者ID:tugstugi,項目名稱:pytorch-saltnet,代碼行數:19,代碼來源:unet_transforms.py

示例15: copyTextureTest

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import warpPerspective [as 別名]
def copyTextureTest(options):
    testdir = 'texture_test/'
    for index in xrange(1):
        planes = np.load(testdir + '/planes_' + str(index) + '.npy')
        image = cv2.imread(testdir + '/image_' + str(index) + '.png')
        segmentations = np.load(testdir + '/segmentations_' + str(index) + '.npy')
        segmentation = np.argmax(segmentations, axis=2)
        plane_depths = calcPlaneDepths(planes, WIDTH, HEIGHT)
        
        textureImage = cv2.imread('../textures/texture_0.jpg')
        textureImage = cv2.resize(textureImage, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)
        floorPlaneIndex = findFloorPlane(planes, segmentation)
        if floorPlaneIndex == -1:
            continue
        mask = segmentation == floorPlaneIndex
        uv = findCornerPoints(planes[floorPlaneIndex], plane_depths[:, :, floorPlaneIndex], mask)
        source_uv = np.array([[0, 0], [0, HEIGHT], [WIDTH, 0], [WIDTH, HEIGHT]])

        h, status = cv2.findHomography(source_uv, uv)
        textureImageWarped = cv2.warpPerspective(textureImage, h, (WIDTH, HEIGHT))
        image[mask] = textureImageWarped[mask]
        cv2.imwrite(testdir + '/' + str(index) + '_texture.png', textureImageWarped)
        cv2.imwrite(testdir + '/' + str(index) + '_result.png', image)
        continue
    return 
開發者ID:art-programmer,項目名稱:PlaneNet,代碼行數:27,代碼來源:CopyTexture.py


注:本文中的cv2.warpPerspective方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。