當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.INPAINT_TELEA屬性代碼示例

本文整理匯總了Python中cv2.INPAINT_TELEA屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.INPAINT_TELEA屬性的具體用法?Python cv2.INPAINT_TELEA怎麽用?Python cv2.INPAINT_TELEA使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.INPAINT_TELEA屬性的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: inpaint

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def inpaint(img, threshold=1):
  h, w = img.shape[:2]

  if len(img.shape) == 3:  # RGB
    mask = np.all(img == 0, axis=2).astype(np.uint8)
    img = cv2.inpaint(img, mask, inpaintRadius=3, flags=cv2.INPAINT_TELEA)

  else:  # depth
    mask = np.where(img > threshold)
    xx, yy = np.meshgrid(np.arange(w), np.arange(h))
    xym = np.vstack((np.ravel(xx[mask]), np.ravel(yy[mask]))).T
    img = np.ravel(img[mask])
    interp = interpolate.NearestNDInterpolator(xym, img)
    img = interp(np.ravel(xx), np.ravel(yy)).reshape(xx.shape)

  return img 
開發者ID:google,項目名稱:graph_distillation,代碼行數:18,代碼來源:imgproc.py

示例2: unmeasure_np

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def unmeasure_np(self, hparams, x_measured_val, theta_val):
        if hparams.unmeasure_type == 'medfilt':
            unmeasure_func = lambda image, mask: signal.medfilt(image)
        elif hparams.unmeasure_type == 'inpaint-telea':
            inpaint_type = cv2.INPAINT_TELEA
            unmeasure_func = measure_utils.get_inpaint_func_opencv(hparams, inpaint_type)
        elif hparams.unmeasure_type == 'inpaint-ns':
            inpaint_type = cv2.INPAINT_NS
            unmeasure_func = measure_utils.get_inpaint_func_opencv(hparams, inpaint_type)
        elif hparams.unmeasure_type == 'inpaint-tv':
            unmeasure_func = measure_utils.get_inpaint_func_tv()
        elif hparams.unmeasure_type == 'blur':
            unmeasure_func = measure_utils.get_blur_func()
        else:
            raise NotImplementedError

        x_unmeasured_val = np.zeros_like(x_measured_val)
        for i in range(x_measured_val.shape[0]):
            x_unmeasured_val[i] = unmeasure_func(x_measured_val[i], theta_val[i])

        return x_unmeasured_val 
開發者ID:AshishBora,項目名稱:ambient-gan,代碼行數:23,代碼來源:measure.py

示例3: remove_watermark_raw

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def remove_watermark_raw(self, img, watermark_template_gray_img, watermark_template_mask_img):
        """
        去除圖片中的水印
        :param img: 待去除水印圖片位圖
        :param watermark_template_gray_img: 水印模板的灰度圖片位圖,用於確定水印位置
        :param watermark_template_mask_img: 水印模板的掩碼圖片位圖,用於修複原始圖片
        :return: 去除水印後的圖片位圖
        """
        # 尋找水印位置
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        x1, y1, x2, y2 = self.find_watermark_from_gray(img_gray, watermark_template_gray_img)

        # 製作原圖的水印位置遮板
        mask = np.zeros(img.shape, np.uint8)
        # watermark_template_mask_img = cv2.cvtColor(watermark_template_gray_img, cv2.COLOR_GRAY2BGR)
        # mask[y1:y1 + self.watermark_template_h, x1:x1 + self.watermark_template_w] = watermark_template_mask_img
        mask[y1:y2, x1:x2] = watermark_template_mask_img
        mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

        # 用遮板進行圖片修複,使用 TELEA 算法
        dst = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)
        # cv2.imwrite('dst.jpg', dst)

        return dst 
開發者ID:SixQuant,項目名稱:nowatermark,代碼行數:26,代碼來源:WatermarkRemover.py

示例4: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def main():
    image = cv2.imread("../data/Damaged Image.tiff", 1)
    mask_image = cv2.imread("../data/Mask.tiff", 0)

    telea_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_TELEA)
    ns_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_NS)

    cv2.imshow("Orignal Image", image)
    cv2.imshow("Mask Image", mask_image)

    cv2.imshow("TELEA Restored Image", telea_image)
    cv2.imshow("NS Restored Image", ns_image)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
開發者ID:amarlearning,項目名稱:Finger-Detection-and-Tracking,代碼行數:17,代碼來源:ImageRestoration.py

示例5: GetDepthImageObservation

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def GetDepthImageObservation(self):
		# ros image to cv2 image
		try:
			cv_img = self.bridge.imgmsg_to_cv2(self.depth_image, "32FC1")
		except Exception as e:
			raise e
		# try:
		# 	cv_rgb_img = self.bridge.imgmsg_to_cv2(self.rgb_image, "bgr8")
		# except Exception as e:
		# 	raise e
		cv_img = np.array(cv_img, dtype=np.float32)

		cv_img[np.isnan(cv_img)] = 0.
		# cv_img/=(10./255.)
		cv_img/=(10000./255.)
		# print 'max:', np.amax(cv_img), 'min:', np.amin(cv_img)
		# cv_img[cv_img > 5.] = -1.
		# cv_img[cv_img < 0.4] = 0.

		# inpainting
		mask = copy.deepcopy(cv_img)
		mask[mask == 0.] = 1.
		mask[mask != 1.] = 0.
		# print 'mask sum:', np.sum(mask)
		mask = np.uint8(mask)
		cv_img = cv2.inpaint(np.uint8(cv_img), mask, 3, cv2.INPAINT_TELEA)

		cv_img = np.array(cv_img, dtype=np.float32)
		# cv_img*=(10./255.)
		cv_img*=(10./255.)
		# resize
		dim = (self.depth_image_size[0], self.depth_image_size[1])
		cv_img = cv2.resize(cv_img, dim, interpolation = cv2.INTER_AREA)

		# cv2 image to ros image and publish
		try:
			resized_img = self.bridge.cv2_to_imgmsg(cv_img, "passthrough")
		except Exception as e:
			raise e
		self.resized_depth_img.publish(resized_img)
		return(cv_img/5.) 
開發者ID:xie9187,項目名稱:Monocular-Obstacle-Avoidance,代碼行數:43,代碼來源:RealWorld.py

示例6: inpaint

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def inpaint(self, win_size=3, rescale_factor=1.0):
        """ Fills in the zero pixels in the image.

        Parameters
        ----------
        win_size : int
            size of window to use for inpainting
        rescale_factor : float
            amount to rescale the image for inpainting, smaller numbers increase speed

        Returns
        -------
        :obj:`ColorImage`
            color image with zero pixels filled in
        """
        # get original shape
        orig_shape = (self.height, self.width)
        
        # resize the image
        resized_data = self.resize(rescale_factor, interp='nearest').data

        # inpaint smaller image
        mask = 1 * (np.sum(resized_data, axis=2) == 0)
        inpainted_data = cv2.inpaint(resized_data, mask.astype(np.uint8),
                                     win_size, cv2.INPAINT_TELEA)
        inpainted_im = ColorImage(inpainted_data, frame=self.frame)

        # fill in zero pixels with inpainted and resized image
        filled_data = inpainted_im.resize(
            orig_shape, interp='bilinear').data
        new_data = self.data
        new_data[self.data == 0] = filled_data[self.data == 0]
        return ColorImage(new_data, frame=self.frame) 
開發者ID:BerkeleyAutomation,項目名稱:perception,代碼行數:35,代碼來源:image.py

示例7: inpaint

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def inpaint(mask, masked_image):
    l = []
    for i in range(mask.size(0)):
        permuted_image = permute_image(masked_image[i], mul255=True)
        m = mask[i].squeeze().byte().numpy()
        inpainted_numpy = cv2.inpaint(permuted_image, m, 3, cv2.INPAINT_TELEA) #cv2.INPAINT_NS
        l.append(transforms.ToTensor()(inpainted_numpy).unsqueeze(0))
    inpainted_tensor = torch.cat(l, 0)

    return inpainted_tensor 
開發者ID:kondiz,項目名稱:casme,代碼行數:12,代碼來源:utils.py

示例8: poisson_blend

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def poisson_blend(input, output, mask):
    """
    * inputs:
        - input (torch.Tensor, required)
                Input tensor of Completion Network, whose shape = (N, 3, H, W).
        - output (torch.Tensor, required)
                Output tensor of Completion Network, whose shape = (N, 3, H, W).
        - mask (torch.Tensor, required)
                Input mask tensor of Completion Network, whose shape = (N, 1, H, W).
    * returns:
                Output image tensor of shape (N, 3, H, W) inpainted with poisson image editing method.
    """
    input = input.clone().cpu()
    output = output.clone().cpu()
    mask = mask.clone().cpu()
    mask = torch.cat((mask, mask, mask), dim=1) # convert to 3-channel format
    num_samples = input.shape[0]
    ret = []
    for i in range(num_samples):
        dstimg = transforms.functional.to_pil_image(input[i])
        dstimg = np.array(dstimg)[:, :, [2, 1, 0]]
        srcimg = transforms.functional.to_pil_image(output[i])
        srcimg = np.array(srcimg)[:, :, [2, 1, 0]]
        msk = transforms.functional.to_pil_image(mask[i])
        msk = np.array(msk)[:, :, [2, 1, 0]]
        # compute mask's center
        xs, ys = [], []
        for j in range(msk.shape[0]):
            for k in range(msk.shape[1]):
                if msk[j, k, 0] == 255:
                    ys.append(j)
                    xs.append(k)
        xmin, xmax = min(xs), max(xs)
        ymin, ymax = min(ys), max(ys)
        center = ((xmax + xmin) // 2, (ymax + ymin) // 2)
        dstimg = cv2.inpaint(dstimg, msk[:, :, 0], 1, cv2.INPAINT_TELEA)
        out = cv2.seamlessClone(srcimg, dstimg, msk, center, cv2.NORMAL_CLONE)
        out = out[:, :, [2, 1, 0]]
        out = transforms.functional.to_tensor(out)
        out = torch.unsqueeze(out, dim=0)
        ret.append(out)
    ret = torch.cat(ret, dim=0)
    return ret 
開發者ID:otenim,項目名稱:GLCIC-PyTorch,代碼行數:45,代碼來源:utils.py

示例9: GetDepthImageObservation

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import INPAINT_TELEA [as 別名]
def GetDepthImageObservation(self):
		# ros image to cv2 image

		try:
			cv_img = self.bridge.imgmsg_to_cv2(self.depth_image, "32FC1")
		except Exception as e:
			raise e
		try:
			cv_rgb_img = self.bridge.imgmsg_to_cv2(self.rgb_image, "bgr8")
		except Exception as e:
			raise e
		cv_img = np.array(cv_img, dtype=np.float32)
		# resize
		dim = (self.depth_image_size[0], self.depth_image_size[1])
		cv_img = cv2.resize(cv_img, dim, interpolation = cv2.INTER_AREA)

		cv_img[np.isnan(cv_img)] = 0.
		cv_img[cv_img < 0.4] = 0.
		cv_img/=(10./255.)

		# cv_img/=(10000./255.)
		# print 'max:', np.amax(cv_img), 'min:', np.amin(cv_img)
		# cv_img[cv_img > 5.] = -1.

		# # inpainting
		# mask = copy.deepcopy(cv_img)
		# mask[mask == 0.] = 1.
		# mask[mask != 1.] = 0.
		# mask = np.uint8(mask)
		# cv_img = cv2.inpaint(np.uint8(cv_img), mask, 3, cv2.INPAINT_TELEA)

		# # guassian noise
		# gauss = np.random.normal(0., 0.5, dim)
		# gauss = gauss.reshape(dim[1], dim[0])
		# cv_img = np.array(cv_img, dtype=np.float32)
		# cv_img = cv_img + gauss
		# cv_img[cv_img<0.00001] = 0.

		# # smoothing
		# kernel = np.ones((4,4),np.float32)/16
		# cv_img = cv2.filter2D(cv_img,-1,kernel)


		cv_img = np.array(cv_img, dtype=np.float32)
		cv_img*=(10./255.)

		# cv2 image to ros image and publish
		try:
			resized_img = self.bridge.cv2_to_imgmsg(cv_img, "passthrough")
		except Exception as e:
			raise e
		self.resized_depth_img.publish(resized_img)
		return(cv_img/5.) 
開發者ID:xie9187,項目名稱:Monocular-Obstacle-Avoidance,代碼行數:55,代碼來源:GazeboWorld.py


注:本文中的cv2.INPAINT_TELEA屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。