当前位置: 首页>>代码示例>>Python>>正文


Python cv2.inpaint方法代码示例

本文整理汇总了Python中cv2.inpaint方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.inpaint方法的具体用法?Python cv2.inpaint怎么用?Python cv2.inpaint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.inpaint方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: inpaint

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def inpaint(img, threshold=1):
  h, w = img.shape[:2]

  if len(img.shape) == 3:  # RGB
    mask = np.all(img == 0, axis=2).astype(np.uint8)
    img = cv2.inpaint(img, mask, inpaintRadius=3, flags=cv2.INPAINT_TELEA)

  else:  # depth
    mask = np.where(img > threshold)
    xx, yy = np.meshgrid(np.arange(w), np.arange(h))
    xym = np.vstack((np.ravel(xx[mask]), np.ravel(yy[mask]))).T
    img = np.ravel(img[mask])
    interp = interpolate.NearestNDInterpolator(xym, img)
    img = interp(np.ravel(xx), np.ravel(yy)).reshape(xx.shape)

  return img 
开发者ID:google,项目名称:graph_distillation,代码行数:18,代码来源:imgproc.py

示例2: get_inpaint_func_tv

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def get_inpaint_func_tv():
    def inpaint_func(image, mask):
        """Total variation inpainting"""
        inpainted = np.zeros_like(image)
        for c in range(image.shape[2]):
            image_c = image[:, :, c]
            mask_c = mask[:, :, c]
            if np.min(mask_c) > 0:
                # if mask is all ones, no need to inpaint
                inpainted[:, :, c] = image_c
            else:
                h, w = image_c.shape
                inpainted_c_var = cvxpy.Variable(h, w)
                obj = cvxpy.Minimize(cvxpy.tv(inpainted_c_var))
                constraints = [cvxpy.mul_elemwise(mask_c, inpainted_c_var) == cvxpy.mul_elemwise(mask_c, image_c)]
                prob = cvxpy.Problem(obj, constraints)
                # prob.solve(solver=cvxpy.SCS, max_iters=100, eps=1e-2)  # scs solver
                prob.solve()  # default solver
                inpainted[:, :, c] = inpainted_c_var.value
        return inpainted
    return inpaint_func 
开发者ID:AshishBora,项目名称:ambient-gan,代码行数:23,代码来源:measure_utils.py

示例3: process

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def process(eval_img, device='cpu'):
    (img, origin, unpadder), file_name = eval_img
    with torch.no_grad():
        out = model(img.to(device))

    prob = F.sigmoid(out)
    mask = prob > 0.5
    mask = torch.nn.MaxPool2d(kernel_size=(3, 3), padding=(1, 1), stride=1)(mask.float()).byte()
    mask = unpadder(mask)
    mask = mask.float().cpu()

    save_image(mask, file_name + ' _mask.jpg')
    origin_np = np.array(to_pil_image(origin[0]))
    mask_np = to_pil_image(mask[0]).convert("L")
    mask_np = np.array(mask_np, dtype='uint8')
    mask_np = draw_bounding_box(origin_np, mask_np, 500)
    mask_ = Image.fromarray(mask_np)
    mask_.save(file_name + "_contour.jpg")
    # ret, mask_np = cv2.threshold(mask_np, 127, 255, 0)
    # dst = cv2.inpaint(origin_np, mask_np, 1, cv2.INPAINT_NS)
    # out = Image.fromarray(dst)
    # out.save(file_name + ' _box.jpg') 
开发者ID:yu45020,项目名称:Text_Segmentation_Image_Inpainting,代码行数:24,代码来源:demo_segmentation.py

示例4: inpaint

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def inpaint(self, missing_value=0):
        """
        Inpaint missing values in depth image.
        :param missing_value: Value to fill in teh depth image.
        """
        # cv2 inpainting doesn't handle the border properly
        # https://stackoverflow.com/questions/25974033/inpainting-depth-map-still-a-black-image-border
        self.img = cv2.copyMakeBorder(self.img, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
        mask = (self.img == missing_value).astype(np.uint8)

        # Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
        scale = np.abs(self.img).max()
        self.img = self.img.astype(np.float32) / scale  # Has to be float32, 64 not supported.
        self.img = cv2.inpaint(self.img, mask, 1, cv2.INPAINT_NS)

        # Back to original size and value range.
        self.img = self.img[1:-1, 1:-1]
        self.img = self.img * scale 
开发者ID:dougsm,项目名称:ggcnn,代码行数:20,代码来源:image.py

示例5: remove_watermark_raw

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def remove_watermark_raw(self, img, watermark_template_gray_img, watermark_template_mask_img):
        """
        去除图片中的水印
        :param img: 待去除水印图片位图
        :param watermark_template_gray_img: 水印模板的灰度图片位图,用于确定水印位置
        :param watermark_template_mask_img: 水印模板的掩码图片位图,用于修复原始图片
        :return: 去除水印后的图片位图
        """
        # 寻找水印位置
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        x1, y1, x2, y2 = self.find_watermark_from_gray(img_gray, watermark_template_gray_img)

        # 制作原图的水印位置遮板
        mask = np.zeros(img.shape, np.uint8)
        # watermark_template_mask_img = cv2.cvtColor(watermark_template_gray_img, cv2.COLOR_GRAY2BGR)
        # mask[y1:y1 + self.watermark_template_h, x1:x1 + self.watermark_template_w] = watermark_template_mask_img
        mask[y1:y2, x1:x2] = watermark_template_mask_img
        mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

        # 用遮板进行图片修复,使用 TELEA 算法
        dst = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)
        # cv2.imwrite('dst.jpg', dst)

        return dst 
开发者ID:SixQuant,项目名称:nowatermark,代码行数:26,代码来源:WatermarkRemover.py

示例6: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def __init__(self, max_objects=1, image_fill_value=0, mask_fill_value=0, always_apply=False, p=0.5):
        """
        Args:
            max_objects: Maximum number of labels that can be zeroed out. Can be tuple, in this case it's [min, max]
            image_fill_value: Fill value to use when filling image.
                Can be 'inpaint' to apply inpaining (works only  for 3-chahnel images)
            mask_fill_value: Fill value to use when filling mask.

        Targets:
            image, mask

        Image types:
            uint8, float32
        """
        super(MaskDropout, self).__init__(always_apply, p)
        self.max_objects = to_tuple(max_objects, 1)
        self.image_fill_value = image_fill_value
        self.mask_fill_value = mask_fill_value 
开发者ID:albumentations-team,项目名称:albumentations,代码行数:20,代码来源:transforms.py

示例7: process_depth_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def process_depth_image(depth, crop_size, out_size=300, return_mask=False, crop_y_offset=0):
    imh, imw = depth.shape

    with TimeIt('1'):
        # Crop.
        depth_crop = depth[(imh - crop_size) // 2 - crop_y_offset:(imh - crop_size) // 2 + crop_size - crop_y_offset,
                           (imw - crop_size) // 2:(imw - crop_size) // 2 + crop_size]
    # depth_nan_mask = np.isnan(depth_crop).astype(np.uint8)

    # Inpaint
    # OpenCV inpainting does weird things at the border.
    with TimeIt('2'):
        depth_crop = cv2.copyMakeBorder(depth_crop, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
        depth_nan_mask = np.isnan(depth_crop).astype(np.uint8)

    with TimeIt('3'):
        depth_crop[depth_nan_mask==1] = 0

    with TimeIt('4'):
        # Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
        depth_scale = np.abs(depth_crop).max()
        depth_crop = depth_crop.astype(np.float32) / depth_scale  # Has to be float32, 64 not supported.

        with TimeIt('Inpainting'):
            depth_crop = cv2.inpaint(depth_crop, depth_nan_mask, 1, cv2.INPAINT_NS)

        # Back to original size and value range.
        depth_crop = depth_crop[1:-1, 1:-1]
        depth_crop = depth_crop * depth_scale

    with TimeIt('5'):
        # Resize
        depth_crop = cv2.resize(depth_crop, (out_size, out_size), cv2.INTER_AREA)

    if return_mask:
        with TimeIt('6'):
            depth_nan_mask = depth_nan_mask[1:-1, 1:-1]
            depth_nan_mask = cv2.resize(depth_nan_mask, (out_size, out_size), cv2.INTER_NEAREST)
        return depth_crop, depth_nan_mask
    else:
        return depth_crop 
开发者ID:dougsm,项目名称:mvp_grasp,代码行数:43,代码来源:ggcnn_torch.py

示例8: get_inpaint_func_opencv

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def get_inpaint_func_opencv(hparams, inpaint_type):
    x_min = hparams.x_min
    x_max = hparams.x_max
    def inpaint_func(image, mask):
        mask = np.prod(mask, axis=2, keepdims=True)
        unknown = (1-mask).astype(np.uint8)
        image = 255 * (image - x_min) / (x_max - x_min)
        image = image.astype(np.uint8)
        inpainted = cv2.inpaint(image, unknown, 3, inpaint_type)
        inpainted = inpainted.astype(np.float32)
        inpainted = inpainted / 255.0 * (x_max - x_min) + x_min
        inpainted = np.reshape(inpainted, image.shape)
        return inpainted
    return inpaint_func 
开发者ID:AshishBora,项目名称:ambient-gan,代码行数:16,代码来源:measure_utils.py

示例9: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def main():
    image = cv2.imread("../data/Damaged Image.tiff", 1)
    mask_image = cv2.imread("../data/Mask.tiff", 0)

    telea_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_TELEA)
    ns_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_NS)

    cv2.imshow("Orignal Image", image)
    cv2.imshow("Mask Image", mask_image)

    cv2.imshow("TELEA Restored Image", telea_image)
    cv2.imshow("NS Restored Image", ns_image)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
开发者ID:amarlearning,项目名称:Finger-Detection-and-Tracking,代码行数:17,代码来源:ImageRestoration.py

示例10: GetDepthImageObservation

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def GetDepthImageObservation(self):
		# ros image to cv2 image
		try:
			cv_img = self.bridge.imgmsg_to_cv2(self.depth_image, "32FC1")
		except Exception as e:
			raise e
		# try:
		# 	cv_rgb_img = self.bridge.imgmsg_to_cv2(self.rgb_image, "bgr8")
		# except Exception as e:
		# 	raise e
		cv_img = np.array(cv_img, dtype=np.float32)

		cv_img[np.isnan(cv_img)] = 0.
		# cv_img/=(10./255.)
		cv_img/=(10000./255.)
		# print 'max:', np.amax(cv_img), 'min:', np.amin(cv_img)
		# cv_img[cv_img > 5.] = -1.
		# cv_img[cv_img < 0.4] = 0.

		# inpainting
		mask = copy.deepcopy(cv_img)
		mask[mask == 0.] = 1.
		mask[mask != 1.] = 0.
		# print 'mask sum:', np.sum(mask)
		mask = np.uint8(mask)
		cv_img = cv2.inpaint(np.uint8(cv_img), mask, 3, cv2.INPAINT_TELEA)

		cv_img = np.array(cv_img, dtype=np.float32)
		# cv_img*=(10./255.)
		cv_img*=(10./255.)
		# resize
		dim = (self.depth_image_size[0], self.depth_image_size[1])
		cv_img = cv2.resize(cv_img, dim, interpolation = cv2.INTER_AREA)

		# cv2 image to ros image and publish
		try:
			resized_img = self.bridge.cv2_to_imgmsg(cv_img, "passthrough")
		except Exception as e:
			raise e
		self.resized_depth_img.publish(resized_img)
		return(cv_img/5.) 
开发者ID:xie9187,项目名称:Monocular-Obstacle-Avoidance,代码行数:43,代码来源:RealWorld.py

示例11: inpaint

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def inpaint(self, win_size=3, rescale_factor=1.0):
        """ Fills in the zero pixels in the image.

        Parameters
        ----------
        win_size : int
            size of window to use for inpainting
        rescale_factor : float
            amount to rescale the image for inpainting, smaller numbers increase speed

        Returns
        -------
        :obj:`ColorImage`
            color image with zero pixels filled in
        """
        # get original shape
        orig_shape = (self.height, self.width)
        
        # resize the image
        resized_data = self.resize(rescale_factor, interp='nearest').data

        # inpaint smaller image
        mask = 1 * (np.sum(resized_data, axis=2) == 0)
        inpainted_data = cv2.inpaint(resized_data, mask.astype(np.uint8),
                                     win_size, cv2.INPAINT_TELEA)
        inpainted_im = ColorImage(inpainted_data, frame=self.frame)

        # fill in zero pixels with inpainted and resized image
        filled_data = inpainted_im.resize(
            orig_shape, interp='bilinear').data
        new_data = self.data
        new_data[self.data == 0] = filled_data[self.data == 0]
        return ColorImage(new_data, frame=self.frame) 
开发者ID:BerkeleyAutomation,项目名称:perception,代码行数:35,代码来源:image.py

示例12: apply

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def apply(self, img, dropout_mask=None, **params):
        if dropout_mask is None:
            return img

        if self.image_fill_value == "inpaint":
            dropout_mask = dropout_mask.astype(np.uint8)
            _, _, w, h = cv2.boundingRect(dropout_mask)
            radius = min(3, max(w, h) // 2)
            img = cv2.inpaint(img, dropout_mask, radius, cv2.INPAINT_NS)
        else:
            img = img.copy()
            img[dropout_mask] = self.image_fill_value

        return img 
开发者ID:albumentations-team,项目名称:albumentations,代码行数:16,代码来源:transforms.py

示例13: inpaint

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def inpaint(mask, masked_image):
    l = []
    for i in range(mask.size(0)):
        permuted_image = permute_image(masked_image[i], mul255=True)
        m = mask[i].squeeze().byte().numpy()
        inpainted_numpy = cv2.inpaint(permuted_image, m, 3, cv2.INPAINT_TELEA) #cv2.INPAINT_NS
        l.append(transforms.ToTensor()(inpainted_numpy).unsqueeze(0))
    inpainted_tensor = torch.cat(l, 0)

    return inpainted_tensor 
开发者ID:kondiz,项目名称:casme,代码行数:12,代码来源:utils.py

示例14: get_normal

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def get_normal(depth_refine,fx=-1,fy=-1,cx=-1,cy=-1,bbox=np.array([0]),refine=True):
    '''
    fast normal computation
    '''
    res_y = depth_refine.shape[0]
    res_x = depth_refine.shape[1]
    centerX=cx
    centerY=cy
    constant_x = 1/fx
    constant_y = 1/fy

    if(refine):
        depth_refine = np.nan_to_num(depth_refine)
        mask = np.zeros_like(depth_refine).astype(np.uint8)
        mask[depth_refine==0]=1
        depth_refine = depth_refine.astype(np.float32)
        depth_refine = cv2.inpaint(depth_refine,mask,2,cv2.INPAINT_NS)
        depth_refine = depth_refine.astype(np.float)
        depth_refine = ndimage.gaussian_filter(depth_refine,2)

    uv_table = np.zeros((res_y,res_x,2),dtype=np.int16)
    column = np.arange(0,res_y)
    uv_table[:,:,1] = np.arange(0,res_x) - centerX #x-c_x (u)
    uv_table[:,:,0] = column[:,np.newaxis] - centerY #y-c_y (v)

    if(bbox.shape[0]==4):
        uv_table = uv_table[bbox[0]:bbox[2],bbox[1]:bbox[3]]
        v_x = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
        v_y = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
        normals = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
        depth_refine=depth_refine[bbox[0]:bbox[2],bbox[1]:bbox[3]]
    else:
        v_x = np.zeros((res_y,res_x,3))
        v_y = np.zeros((res_y,res_x,3))
        normals = np.zeros((res_y,res_x,3))
    
    uv_table_sign= np.copy(uv_table)
    uv_table=np.abs(np.copy(uv_table))

    
    dig=np.gradient(depth_refine,2,edge_order=2)
    v_y[:,:,0]=uv_table_sign[:,:,1]*constant_x*dig[0]
    v_y[:,:,1]=depth_refine*constant_y+(uv_table_sign[:,:,0]*constant_y)*dig[0]
    v_y[:,:,2]=dig[0]

    v_x[:,:,0]=depth_refine*constant_x+uv_table_sign[:,:,1]*constant_x*dig[1]
    v_x[:,:,1]=uv_table_sign[:,:,0]*constant_y*dig[1]
    v_x[:,:,2]=dig[1]

    cross = np.cross(v_x.reshape(-1,3),v_y.reshape(-1,3))
    norm = np.expand_dims(np.linalg.norm(cross,axis=1),axis=1)
    norm[norm==0]=1
    cross = cross/norm
    if(bbox.shape[0]==4):
        cross =cross.reshape((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
    else:
        cross =cross.reshape(res_y,res_x,3)
    cross= np.nan_to_num(cross)
    return cross 
开发者ID:kirumang,项目名称:Pix2Pose,代码行数:61,代码来源:common_util.py

示例15: poisson_blend

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import inpaint [as 别名]
def poisson_blend(input, output, mask):
    """
    * inputs:
        - input (torch.Tensor, required)
                Input tensor of Completion Network, whose shape = (N, 3, H, W).
        - output (torch.Tensor, required)
                Output tensor of Completion Network, whose shape = (N, 3, H, W).
        - mask (torch.Tensor, required)
                Input mask tensor of Completion Network, whose shape = (N, 1, H, W).
    * returns:
                Output image tensor of shape (N, 3, H, W) inpainted with poisson image editing method.
    """
    input = input.clone().cpu()
    output = output.clone().cpu()
    mask = mask.clone().cpu()
    mask = torch.cat((mask, mask, mask), dim=1) # convert to 3-channel format
    num_samples = input.shape[0]
    ret = []
    for i in range(num_samples):
        dstimg = transforms.functional.to_pil_image(input[i])
        dstimg = np.array(dstimg)[:, :, [2, 1, 0]]
        srcimg = transforms.functional.to_pil_image(output[i])
        srcimg = np.array(srcimg)[:, :, [2, 1, 0]]
        msk = transforms.functional.to_pil_image(mask[i])
        msk = np.array(msk)[:, :, [2, 1, 0]]
        # compute mask's center
        xs, ys = [], []
        for j in range(msk.shape[0]):
            for k in range(msk.shape[1]):
                if msk[j, k, 0] == 255:
                    ys.append(j)
                    xs.append(k)
        xmin, xmax = min(xs), max(xs)
        ymin, ymax = min(ys), max(ys)
        center = ((xmax + xmin) // 2, (ymax + ymin) // 2)
        dstimg = cv2.inpaint(dstimg, msk[:, :, 0], 1, cv2.INPAINT_TELEA)
        out = cv2.seamlessClone(srcimg, dstimg, msk, center, cv2.NORMAL_CLONE)
        out = out[:, :, [2, 1, 0]]
        out = transforms.functional.to_tensor(out)
        out = torch.unsqueeze(out, dim=0)
        ret.append(out)
    ret = torch.cat(ret, dim=0)
    return ret 
开发者ID:otenim,项目名称:GLCIC-PyTorch,代码行数:45,代码来源:utils.py


注:本文中的cv2.inpaint方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。