当前位置: 首页>>代码示例>>Python>>正文


Python cv2.WARP_INVERSE_MAP属性代码示例

本文整理汇总了Python中cv2.WARP_INVERSE_MAP属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.WARP_INVERSE_MAP属性的具体用法?Python cv2.WARP_INVERSE_MAP怎么用?Python cv2.WARP_INVERSE_MAP使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.WARP_INVERSE_MAP属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: deskew

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def deskew(image, image_shape, negated=False):
    """
    This method deskwes an image using moments
    :param image: a numpy nd array input image
    :param image_shape: a tuple denoting the image`s shape
    :param negated: a boolean flag telling  whether the input image is a negated one

    :returns: a numpy nd array deskewd image
    """
    
    # negate the image
    if not negated:
        image = 255-image

    # calculate the moments of the image
    m = cv2.moments(image)
    if abs(m['mu02']) < 1e-2:
        return image.copy()

    # caclulating the skew
    skew = m['mu11']/m['mu02']
    M = numpy.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]])
    img = cv2.warpAffine(image, M, image_shape, flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
    
    return img 
开发者ID:vsvinayak,项目名称:mnist-helper,代码行数:27,代码来源:mnist_helpers.py

示例2: _getMaskOutput

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def _getMaskOutput(self, netOutput):
        netOutput = netOutput.transpose(0, 2, 3, 1)        
        MaskOutput = [[] for _ in range(self.bz)]
        
        idx = 0
        for i, (img, kpts) in enumerate(zip(self.batchimgs, self.batchkpts)):
            height, width = img.shape[0:2]
            for j in range(len(kpts)):
                predmap = netOutput[idx]
                H_e2e = self.maskAlignMatrixs[i][j]
                
                pred_e2e = cv2.warpAffine(predmap, H_e2e[0:2], (width, height), 
                                          borderMode=cv2.BORDER_CONSTANT,
                                          flags=cv2.WARP_INVERSE_MAP+cv2.INTER_LINEAR) 
                               
                pred_e2e = pred_e2e[:, :, 1]
                pred_e2e[pred_e2e>0.5] = 1
                pred_e2e[pred_e2e<=0.5] = 0
                mask = pred_e2e.astype(np.uint8) 
                MaskOutput[i].append(mask)                
                
                idx += 1
        return MaskOutput 
开发者ID:liruilong940607,项目名称:Pose2Seg,代码行数:25,代码来源:build_model.py

示例3: get_hog

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def get_hog() : 
    winSize = (20,20)
    blockSize = (10,10)
    blockStride = (5,5)
    cellSize = (10,10)
    nbins = 9
    derivAperture = 1
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 1
    nlevels = 64
    signedGradient = True

    hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)

    return hog
    affine_flags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR 
开发者ID:hoanglehaithanh,项目名称:Traffic-Sign-Detection,代码行数:20,代码来源:classification.py

示例4: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def __call__(self, sample):
        fg, alpha = sample['fg'], sample['alpha']
        rows, cols, ch = fg.shape
        if np.maximum(rows, cols) < 1024:
            params = self.get_params((0, 0), self.translate, self.scale, self.shear, self.flip, fg.size)
        else:
            params = self.get_params(self.degrees, self.translate, self.scale, self.shear, self.flip, fg.size)

        center = (cols * 0.5 + 0.5, rows * 0.5 + 0.5)
        M = self._get_inverse_affine_matrix(center, *params)
        M = np.array(M).reshape((2, 3))

        fg = cv2.warpAffine(fg, M, (cols, rows),
                            flags=maybe_random_interp(cv2.INTER_NEAREST) + cv2.WARP_INVERSE_MAP)
        alpha = cv2.warpAffine(alpha, M, (cols, rows),
                               flags=maybe_random_interp(cv2.INTER_NEAREST) + cv2.WARP_INVERSE_MAP)

        sample['fg'], sample['alpha'] = fg, alpha

        return sample 
开发者ID:Yaoyi-Li,项目名称:GCA-Matting,代码行数:22,代码来源:data_generator.py

示例5: get_full_frame_mask

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def get_full_frame_mask(self, width, height):
        """ Return the stored mask in a full size frame of the given dimensions

        Parameters
        ----------
        width: int
            The width of the original frame that the mask was extracted from
        height: int
            The height of the original frame that the mask was extracted from

        Returns
        -------
        numpy.ndarray: The mask affined to the original full frame of the given dimensions
        """
        frame = np.zeros((width, height, 1), dtype="uint8")
        mask = cv2.warpAffine(self.mask,
                              self._affine_matrix,
                              (width, height),
                              frame,
                              flags=cv2.WARP_INVERSE_MAP | self._interpolator,
                              borderMode=cv2.BORDER_CONSTANT)
        logger.trace("mask shape: %s, mask dtype: %s, mask min: %s, mask max: %s",
                     mask.shape, mask.dtype, mask.min(), mask.max())
        return mask 
开发者ID:deepfakes,项目名称:faceswap,代码行数:26,代码来源:faces_detect.py

示例6: _align_rois

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def _align_rois(self, face_images, face_landmarks):
        assert len(face_images) == len(face_landmarks), \
            "Input lengths differ, got %s and %s" % \
            (len(face_images), len(face_landmarks))

        for image, image_landmarks in zip(face_images, face_landmarks):
            assert len(image.shape) == 4, "Face image is expected"
            image = image[0]

            scale = np.array((image.shape[-1], image.shape[-2]))
            desired_landmarks = np.array(self.REFERENCE_LANDMARKS, dtype=np.float64) * scale
            landmarks = image_landmarks.get_array() * scale

            transform = FaceIdentifier.get_transform(desired_landmarks, landmarks)
            img = image.transpose((1, 2, 0))
            cv2.warpAffine(img, transform, tuple(scale), img,
                           flags=cv2.WARP_INVERSE_MAP)
            image[:] = img.transpose((2, 0, 1)) 
开发者ID:opencv,项目名称:open_model_zoo,代码行数:20,代码来源:face_identifier.py

示例7: deskew

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def deskew(img):
	m = cv2.moments(img)
	if abs(m['mu02']) < 1e-2:
		return img.copy()
	skew = m['mu11']/m['mu02']
	M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
	img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
	return img
#来自opencv的sample,用于svm训练 
开发者ID:wzh191920,项目名称:License-Plate-Recognition,代码行数:11,代码来源:predict.py

示例8: transformation_points

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def transformation_points(src_img, src_points, dst_img, dst_points):
    src_points = src_points.astype(np.float64)
    dst_points = dst_points.astype(np.float64)

    c1 = np.mean(src_points, axis=0)
    c2 = np.mean(dst_points, axis=0)

    src_points -= c1
    dst_points -= c2

    s1 = np.std(src_points)
    s2 = np.std(dst_points)

    src_points /= s1
    dst_points /= s2

    u, s, vt = np.linalg.svd(src_points.T * dst_points)
    r = (u * vt).T

    m = np.vstack([np.hstack(((s2 / s1) * r, c2.T - (s2 / s1) * r * c1.T)), np.matrix([0., 0., 1.])])

    output = cv2.warpAffine(dst_img, m[:2],
                            (src_img.shape[1], src_img.shape[0]),
                            borderMode=cv2.BORDER_TRANSPARENT,
                            flags=cv2.WARP_INVERSE_MAP)

    return output 
开发者ID:gyp03,项目名称:yry,代码行数:29,代码来源:morpher.py

示例9: tran_matrix

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def tran_matrix(src_img, src_points, dst_img, dst_points):
    h = cv2.findHomography(dst_points, src_points)
    output = cv2.warpAffine(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]),
                            borderMode=cv2.BORDER_TRANSPARENT,
                            flags=cv2.WARP_INVERSE_MAP)

    return output 
开发者ID:gyp03,项目名称:yry,代码行数:9,代码来源:morpher.py

示例10: de_skew

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def de_skew(image, width):
	# Grab the width and height of the image and compute moments for the image
	(h, w) = image.shape[:2]
	moments = cv2.moments(image)
	
	# De-skew the image by applying an affine transformation
	skew = moments["mu11"] / moments["mu02"]
	matrix = np.float32([[1, skew, -0.5 * w * skew], [0, 1, 0]])
	image = cv2.warpAffine(image, matrix, (w, h), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)

	# Resize the image to have a constant width
	image = imutils.resize(image, width=width)
	
	# Return the de-skewed image
	return image 
开发者ID:hsSam,项目名称:PracticalPythonAndOpenCV_CaseStudies,代码行数:17,代码来源:dataset.py

示例11: deskew

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def deskew(img):
    m = cv2.moments(img)
    if abs(m['mu02']) < 1e-2:
        return img.copy()
    skew = m['mu11']/m['mu02']
    M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
    img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
    return img 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:10,代码来源:digits.py

示例12: transform

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def transform (image):
  CAL_VAL = np.loadtxt("calibrated_value.txt")
  imheight = np.size(image, 0)
  imwidth = np.size(image, 1)
  M = getTransform (imwidth, imheight, CAL_VAL[2], CAL_VAL[3], CAL_VAL[4], CAL_VAL[5], CAL_VAL[6], CAL_VAL[7], CAL_VAL[8])
  transformed  = cv2.warpPerspective(image, M, (imwidth,imheight),cv2.INTER_CUBIC or cv2.WARP_INVERSE_MAP)
  return transformed 
开发者ID:muchlisinadi,项目名称:ALPR-Indonesia,代码行数:9,代码来源:imutils.py

示例13: detransform

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def detransform(image):
  CAL_VAL = np.loadtxt("calibrated_value.txt")
  imheight = np.size(image, 0)
  imwidth = np.size(image, 1)
  M = getTransform (imwidth, imheight, (0-CAL_VAL[2]), (0-CAL_VAL[3]), (0-CAL_VAL[4]), (0-CAL_VAL[5]), (0-CAL_VAL[6]), (1-CAL_VAL[7]), (1-CAL_VAL[8]))
  #M = getTransform (imwidth, imheight, 0.0, 0.0, 0.0, 0, 0, 1.0,1.0)
  detransformed  = cv2.warpPerspective(image, M, (imwidth,imheight),cv2.INTER_CUBIC or cv2.WARP_INVERSE_MAP)
  return detransformed 
开发者ID:muchlisinadi,项目名称:ALPR-Indonesia,代码行数:10,代码来源:imutils.py

示例14: warp_im

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(
        im,
        M[:2], (dshape[1], dshape[0]),
        dst=output_im,
        borderMode=cv2.BORDER_TRANSPARENT,
        flags=cv2.WARP_INVERSE_MAP)
    return output_im 
开发者ID:vipstone,项目名称:faceai,代码行数:11,代码来源:faceswap.py

示例15: deskew

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WARP_INVERSE_MAP [as 别名]
def deskew(img):
    m = cv2.moments(img)
    if abs(m['mu02']) < 1e-2:
        return img.copy()
    skew = m['mu11']/m['mu02']
    M = np.float32([[1, skew, -0.5*SIZE*skew], [0, 1, 0]])
    img = cv2.warpAffine(img, M, (SIZE, SIZE), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
    return img 
开发者ID:hoanglehaithanh,项目名称:Traffic-Sign-Detection,代码行数:10,代码来源:classification.py


注:本文中的cv2.WARP_INVERSE_MAP属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。