当前位置: 首页>>代码示例>>Python>>正文


Python cv2.filter2D方法代码示例

本文整理汇总了Python中cv2.filter2D方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.filter2D方法的具体用法?Python cv2.filter2D怎么用?Python cv2.filter2D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.filter2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ssim

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
开发者ID:cszn,项目名称:KAIR,代码行数:23,代码来源:utils_image.py

示例2: filter2D

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def filter2D(input_arr, filter):
    """
    2D filtering (i.e. convolution but without mirroring the filter).  Mostly a convenience wrapper
    around OpenCV.

    Parameters
    ----------
    input_arr : numpy array, HxW size
    filter : numpy array, H1xW1 size
    
    Returns
    -------
    result : numpy array, HxW size

    """
    return cv2.filter2D(input_arr, 
                        -1, 
                        filter,
                        borderType=cv2.BORDER_CONSTANT) 
开发者ID:bbabenko,项目名称:simple_convnet,代码行数:21,代码来源:helpers.py

示例3: batch_filter3D

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def batch_filter3D(input_arr, filters):
    """
    3D filtering (i.e. convolution but without mirroring the filter).

    Parameters
    ----------
    input_arr : numpy array, NxHxWxC size where N is the number of images to be filtered
    filter : numpy array, H1xW1xC size
    
    Returns
    -------
    result : numpy array, NxHxW size
    
    """
    assert input_arr.shape[3] == filters.shape[2]
    num_input = input_arr.shape[0]
    output = np.zeros(input_arr.shape[:3] + (filters.shape[-1],))
    for n in xrange(num_input):
        input1 = input_arr[n]
        for f in xrange(filters.shape[-1]):
            for c in xrange(filters.shape[-2]):
                output[n,:,:,f] += filter2D(input1[...,c].copy(), filters[...,c,f].copy())
    return output 
开发者ID:bbabenko,项目名称:simple_convnet,代码行数:25,代码来源:helpers.py

示例4: ssim

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def ssim(img1, img2):

    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
开发者ID:Paper99,项目名称:SRFBN_CVPR19,代码行数:24,代码来源:util.py

示例5: push_heuristic

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def push_heuristic(self, depth_heightmap):

        num_rotations = 16

        for rotate_idx in range(num_rotations):
            rotated_heightmap = ndimage.rotate(depth_heightmap, rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            valid_areas = np.zeros(rotated_heightmap.shape)
            valid_areas[ndimage.interpolation.shift(rotated_heightmap, [0,-25], order=0) - rotated_heightmap > 0.02] = 1
            # valid_areas = np.multiply(valid_areas, rotated_heightmap)
            blur_kernel = np.ones((25,25),np.float32)/9
            valid_areas = cv2.filter2D(valid_areas, -1, blur_kernel)
            tmp_push_predictions = ndimage.rotate(valid_areas, -rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            tmp_push_predictions.shape = (1, rotated_heightmap.shape[0], rotated_heightmap.shape[1])

            if rotate_idx == 0:
                push_predictions = tmp_push_predictions
            else:
                push_predictions = np.concatenate((push_predictions, tmp_push_predictions), axis=0)

        best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
        return best_pix_ind 
开发者ID:andyzeng,项目名称:visual-pushing-grasping,代码行数:23,代码来源:trainer.py

示例6: grasp_heuristic

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def grasp_heuristic(self, depth_heightmap):

        num_rotations = 16

        for rotate_idx in range(num_rotations):
            rotated_heightmap = ndimage.rotate(depth_heightmap, rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            valid_areas = np.zeros(rotated_heightmap.shape)
            valid_areas[np.logical_and(rotated_heightmap - ndimage.interpolation.shift(rotated_heightmap, [0,-25], order=0) > 0.02, rotated_heightmap - ndimage.interpolation.shift(rotated_heightmap, [0,25], order=0) > 0.02)] = 1
            # valid_areas = np.multiply(valid_areas, rotated_heightmap)
            blur_kernel = np.ones((25,25),np.float32)/9
            valid_areas = cv2.filter2D(valid_areas, -1, blur_kernel)
            tmp_grasp_predictions = ndimage.rotate(valid_areas, -rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            tmp_grasp_predictions.shape = (1, rotated_heightmap.shape[0], rotated_heightmap.shape[1])

            if rotate_idx == 0:
                grasp_predictions = tmp_grasp_predictions
            else:
                grasp_predictions = np.concatenate((grasp_predictions, tmp_grasp_predictions), axis=0)

        best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
        return best_pix_ind 
开发者ID:andyzeng,项目名称:visual-pushing-grasping,代码行数:23,代码来源:trainer.py

示例7: preprocess_filt_hps

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def preprocess_filt_hps(self):
        '''
        Do the pre processing using (orig - low-pass filter) = high-pass filter filter (3.9/5.3 min).
        '''
        import cv2
        import numpy as np
        

        if self.zeroMask is not None:
            self.zeroMask = (self.I1 == 0)

        kernel = -np.ones((self.WallisFilterWidth,self.WallisFilterWidth), dtype=np.float32)

        kernel[int((self.WallisFilterWidth-1)/2),int((self.WallisFilterWidth-1)/2)] = kernel.size - 1

        kernel = kernel / kernel.size

#        pdb.set_trace()

        self.I1 = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)

        self.I2 = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT) 
开发者ID:leiyangleon,项目名称:autoRIFT,代码行数:24,代码来源:autoRIFT.py

示例8: preprocess_filt_sob

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def preprocess_filt_sob(self):
        '''
        Do the pre processing using sobel filter (4.5/5.8 min).
        '''
        import cv2
        import numpy as np
        
        
        
        if self.zeroMask is not None:
            self.zeroMask = (self.I1 == 0)
        
        sobelx = cv2.getDerivKernels(1,0,self.WallisFilterWidth)
        
        kernelx = np.outer(sobelx[0],sobelx[1])
        
        sobely = cv2.getDerivKernels(0,1,self.WallisFilterWidth)
        
        kernely = np.outer(sobely[0],sobely[1])
        
        kernel = kernelx + kernely
        
        self.I1 = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)
        
        self.I2 = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT) 
开发者ID:leiyangleon,项目名称:autoRIFT,代码行数:27,代码来源:autoRIFT.py

示例9: __call__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def __call__(self, img):
        if random.random() < self.prob:
            alpha = self.limit * random.uniform(0, 1)
            kernel = np.ones((3, 3), np.float32)/9 * 0.2

            colored = img[..., :3]
            colored = alpha * cv2.filter2D(colored, -1, kernel) + (1-alpha) * colored
            maxval = np.max(img[..., :3])
            dtype = img.dtype
            img[..., :3] = clip(colored, dtype, maxval)

        return img


# https://github.com/pytorch/vision/pull/27/commits/659c854c6971ecc5b94dca3f4459ef2b7e42fb70
# color augmentation

# brightness, contrast, saturation-------------
# from mxnet code, see: https://github.com/dmlc/mxnet/blob/master/python/mxnet/image.py 
开发者ID:asanakoy,项目名称:kaggle_carvana_segmentation,代码行数:21,代码来源:transforms.py

示例10: _ssim

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def _ssim(img1, img2):
        C1 = (0.01 * 255) ** 2
        C2 = (0.03 * 255) ** 2

        img1 = img1.astype(np.float64)
        img2 = img2.astype(np.float64)
        kernel = cv2.getGaussianKernel(11, 1.5)
        window = np.outer(kernel, kernel.transpose())

        mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
        mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
        mu1_sq = mu1 ** 2
        mu2_sq = mu2 ** 2
        mu1_mu2 = mu1 * mu2
        sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
        sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
        sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

        ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
            (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
        )
        return ssim_map.mean() 
开发者ID:bonlime,项目名称:pytorch-tools,代码行数:24,代码来源:psnr.py

示例11: motion_blur

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def motion_blur(self, data, random_state):
        images = data['image'].copy()
        labels = data['label'].copy()

        # generating the kernel
        kernel_motion_blur = np.zeros((self.size, self.size))
        if random.random() > 0.5: # horizontal kernel
            kernel_motion_blur[int((self.size-1)/2), :] = np.ones(self.size)
        else: # vertical kernel
            kernel_motion_blur[:, int((self.size-1)/2)] = np.ones(self.size)
        kernel_motion_blur = kernel_motion_blur / self.size

        k = min(self.sections, images.shape[0])
        selected_idx = np.random.choice(images.shape[0], k, replace=True)

        for idx in selected_idx:
            # applying the kernel to the input image
            images[idx] = cv2.filter2D(images[idx], -1, kernel_motion_blur)
    
        return images, labels 
开发者ID:zudi-lin,项目名称:pytorch_connectomics,代码行数:22,代码来源:motion_blur.py

示例12: backprojection

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def backprojection(target, roihist):
    '''图像预处理'''
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    cv2.filter2D(dst,-1,disc,dst)
    # threshold and binary AND
    ret,binary = cv2.threshold(dst,80,255,0)
    # 创建 核
    kernel = np.ones((5,5), np.uint8)
    iter_time = 1
    # 闭运算
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)

    thresh = cv2.merge((binary,binary,binary))
    target_filter = cv2.bitwise_and(target,thresh)
    
    return binary, target_filter 
开发者ID:1zlab,项目名称:1ZLAB_PyEspCar,代码行数:21,代码来源:cvutils.py

示例13: get_mag_avg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def get_mag_avg(img):

    img = np.sqrt(img)

    kernels = get_kernels()

    mag = np.zeros(img.shape, dtype='float32')

    for kernel_filter in kernels:

        gx = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[1], borderType=cv2.BORDER_REFLECT)
        gy = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[0], borderType=cv2.BORDER_REFLECT)

        mag += cv2.magnitude(gx, gy)

    mag /= len(kernels)

    return np.uint8(mag) 
开发者ID:jgrss,项目名称:spfeas,代码行数:20,代码来源:spfunctions.py

示例14: blur_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def blur_image(self, image):
        def rand_kernel():
            size = np.random.randn(1)
            size = int(np.round(size)) * 2 + 1
            if size < 0: return None
            if random.random() < 0.5: return None
            size = min(size, 45)
            kernel = np.zeros((size, size))
            c = int(size/2)
            wx = random.random()
            kernel[:, c] += 1. / size * wx
            kernel[c, :] += 1. / size * (1-wx)
            return kernel

        kernel = rand_kernel()

        if kernel is not None:
            image = cv2.filter2D(image, -1, kernel)
        return image 
开发者ID:foolwood,项目名称:SiamMask,代码行数:21,代码来源:siam_rpn_dataset.py

示例15: linear_motion_blur

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import filter2D [as 别名]
def linear_motion_blur(self, img, angle, length):
        """:param angle: in degree"""
        rad = np.deg2rad(angle)
        dx = np.cos(rad)
        dy = np.sin(rad)
        a = int(max(list(map(abs, (dx, dy)))) * length * 2)
        if a <= 0:
            return img
        kern = np.zeros((a, a))
        cx, cy = a // 2, a // 2
        dx, dy = list(map(int, (dx * length + cx, dy * length + cy)))
        cv2.line(kern, (cx, cy), (dx, dy), 1.0)
        s = kern.sum()
        if s == 0:
            kern[cx, cy] = 1.0
        else:
            kern /= s
        return cv2.filter2D(img, -1, kern) 
开发者ID:ethnhe,项目名称:PVN3D,代码行数:20,代码来源:ycb_dataset.py


注:本文中的cv2.filter2D方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。