本文整理汇总了Python中cv2.INPAINT_NS属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.INPAINT_NS属性的具体用法?Python cv2.INPAINT_NS怎么用?Python cv2.INPAINT_NS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.INPAINT_NS属性的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def process(eval_img, device='cpu'):
(img, origin, unpadder), file_name = eval_img
with torch.no_grad():
out = model(img.to(device))
prob = F.sigmoid(out)
mask = prob > 0.5
mask = torch.nn.MaxPool2d(kernel_size=(3, 3), padding=(1, 1), stride=1)(mask.float()).byte()
mask = unpadder(mask)
mask = mask.float().cpu()
save_image(mask, file_name + ' _mask.jpg')
origin_np = np.array(to_pil_image(origin[0]))
mask_np = to_pil_image(mask[0]).convert("L")
mask_np = np.array(mask_np, dtype='uint8')
mask_np = draw_bounding_box(origin_np, mask_np, 500)
mask_ = Image.fromarray(mask_np)
mask_.save(file_name + "_contour.jpg")
# ret, mask_np = cv2.threshold(mask_np, 127, 255, 0)
# dst = cv2.inpaint(origin_np, mask_np, 1, cv2.INPAINT_NS)
# out = Image.fromarray(dst)
# out.save(file_name + ' _box.jpg')
示例2: inpaint
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def inpaint(self, missing_value=0):
"""
Inpaint missing values in depth image.
:param missing_value: Value to fill in teh depth image.
"""
# cv2 inpainting doesn't handle the border properly
# https://stackoverflow.com/questions/25974033/inpainting-depth-map-still-a-black-image-border
self.img = cv2.copyMakeBorder(self.img, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
mask = (self.img == missing_value).astype(np.uint8)
# Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
scale = np.abs(self.img).max()
self.img = self.img.astype(np.float32) / scale # Has to be float32, 64 not supported.
self.img = cv2.inpaint(self.img, mask, 1, cv2.INPAINT_NS)
# Back to original size and value range.
self.img = self.img[1:-1, 1:-1]
self.img = self.img * scale
示例3: unmeasure_np
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def unmeasure_np(self, hparams, x_measured_val, theta_val):
if hparams.unmeasure_type == 'medfilt':
unmeasure_func = lambda image, mask: signal.medfilt(image)
elif hparams.unmeasure_type == 'inpaint-telea':
inpaint_type = cv2.INPAINT_TELEA
unmeasure_func = measure_utils.get_inpaint_func_opencv(hparams, inpaint_type)
elif hparams.unmeasure_type == 'inpaint-ns':
inpaint_type = cv2.INPAINT_NS
unmeasure_func = measure_utils.get_inpaint_func_opencv(hparams, inpaint_type)
elif hparams.unmeasure_type == 'inpaint-tv':
unmeasure_func = measure_utils.get_inpaint_func_tv()
elif hparams.unmeasure_type == 'blur':
unmeasure_func = measure_utils.get_blur_func()
else:
raise NotImplementedError
x_unmeasured_val = np.zeros_like(x_measured_val)
for i in range(x_measured_val.shape[0]):
x_unmeasured_val[i] = unmeasure_func(x_measured_val[i], theta_val[i])
return x_unmeasured_val
示例4: process_depth_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def process_depth_image(depth, crop_size, out_size=300, return_mask=False, crop_y_offset=0):
imh, imw = depth.shape
with TimeIt('1'):
# Crop.
depth_crop = depth[(imh - crop_size) // 2 - crop_y_offset:(imh - crop_size) // 2 + crop_size - crop_y_offset,
(imw - crop_size) // 2:(imw - crop_size) // 2 + crop_size]
# depth_nan_mask = np.isnan(depth_crop).astype(np.uint8)
# Inpaint
# OpenCV inpainting does weird things at the border.
with TimeIt('2'):
depth_crop = cv2.copyMakeBorder(depth_crop, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
depth_nan_mask = np.isnan(depth_crop).astype(np.uint8)
with TimeIt('3'):
depth_crop[depth_nan_mask==1] = 0
with TimeIt('4'):
# Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
depth_scale = np.abs(depth_crop).max()
depth_crop = depth_crop.astype(np.float32) / depth_scale # Has to be float32, 64 not supported.
with TimeIt('Inpainting'):
depth_crop = cv2.inpaint(depth_crop, depth_nan_mask, 1, cv2.INPAINT_NS)
# Back to original size and value range.
depth_crop = depth_crop[1:-1, 1:-1]
depth_crop = depth_crop * depth_scale
with TimeIt('5'):
# Resize
depth_crop = cv2.resize(depth_crop, (out_size, out_size), cv2.INTER_AREA)
if return_mask:
with TimeIt('6'):
depth_nan_mask = depth_nan_mask[1:-1, 1:-1]
depth_nan_mask = cv2.resize(depth_nan_mask, (out_size, out_size), cv2.INTER_NEAREST)
return depth_crop, depth_nan_mask
else:
return depth_crop
示例5: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def main():
image = cv2.imread("../data/Damaged Image.tiff", 1)
mask_image = cv2.imread("../data/Mask.tiff", 0)
telea_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_TELEA)
ns_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_NS)
cv2.imshow("Orignal Image", image)
cv2.imshow("Mask Image", mask_image)
cv2.imshow("TELEA Restored Image", telea_image)
cv2.imshow("NS Restored Image", ns_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例6: apply
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def apply(self, img, dropout_mask=None, **params):
if dropout_mask is None:
return img
if self.image_fill_value == "inpaint":
dropout_mask = dropout_mask.astype(np.uint8)
_, _, w, h = cv2.boundingRect(dropout_mask)
radius = min(3, max(w, h) // 2)
img = cv2.inpaint(img, dropout_mask, radius, cv2.INPAINT_NS)
else:
img = img.copy()
img[dropout_mask] = self.image_fill_value
return img
示例7: inpaint
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def inpaint(mask, masked_image):
l = []
for i in range(mask.size(0)):
permuted_image = permute_image(masked_image[i], mul255=True)
m = mask[i].squeeze().byte().numpy()
inpainted_numpy = cv2.inpaint(permuted_image, m, 3, cv2.INPAINT_TELEA) #cv2.INPAINT_NS
l.append(transforms.ToTensor()(inpainted_numpy).unsqueeze(0))
inpainted_tensor = torch.cat(l, 0)
return inpainted_tensor
示例8: get_normal
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def get_normal(depth_refine,fx=-1,fy=-1,cx=-1,cy=-1,bbox=np.array([0]),refine=True):
'''
fast normal computation
'''
res_y = depth_refine.shape[0]
res_x = depth_refine.shape[1]
centerX=cx
centerY=cy
constant_x = 1/fx
constant_y = 1/fy
if(refine):
depth_refine = np.nan_to_num(depth_refine)
mask = np.zeros_like(depth_refine).astype(np.uint8)
mask[depth_refine==0]=1
depth_refine = depth_refine.astype(np.float32)
depth_refine = cv2.inpaint(depth_refine,mask,2,cv2.INPAINT_NS)
depth_refine = depth_refine.astype(np.float)
depth_refine = ndimage.gaussian_filter(depth_refine,2)
uv_table = np.zeros((res_y,res_x,2),dtype=np.int16)
column = np.arange(0,res_y)
uv_table[:,:,1] = np.arange(0,res_x) - centerX #x-c_x (u)
uv_table[:,:,0] = column[:,np.newaxis] - centerY #y-c_y (v)
if(bbox.shape[0]==4):
uv_table = uv_table[bbox[0]:bbox[2],bbox[1]:bbox[3]]
v_x = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
v_y = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
normals = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
depth_refine=depth_refine[bbox[0]:bbox[2],bbox[1]:bbox[3]]
else:
v_x = np.zeros((res_y,res_x,3))
v_y = np.zeros((res_y,res_x,3))
normals = np.zeros((res_y,res_x,3))
uv_table_sign= np.copy(uv_table)
uv_table=np.abs(np.copy(uv_table))
dig=np.gradient(depth_refine,2,edge_order=2)
v_y[:,:,0]=uv_table_sign[:,:,1]*constant_x*dig[0]
v_y[:,:,1]=depth_refine*constant_y+(uv_table_sign[:,:,0]*constant_y)*dig[0]
v_y[:,:,2]=dig[0]
v_x[:,:,0]=depth_refine*constant_x+uv_table_sign[:,:,1]*constant_x*dig[1]
v_x[:,:,1]=uv_table_sign[:,:,0]*constant_y*dig[1]
v_x[:,:,2]=dig[1]
cross = np.cross(v_x.reshape(-1,3),v_y.reshape(-1,3))
norm = np.expand_dims(np.linalg.norm(cross,axis=1),axis=1)
norm[norm==0]=1
cross = cross/norm
if(bbox.shape[0]==4):
cross =cross.reshape((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
else:
cross =cross.reshape(res_y,res_x,3)
cross= np.nan_to_num(cross)
return cross
示例9: process_depth_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import INPAINT_NS [as 别名]
def process_depth_image(depth, crop_size, out_size=300, return_mask=False, crop_y_offset=0):
imh, imw = depth.shape
with TimeIt('Process Depth Image'):
with TimeIt('Crop'):
# Crop.
depth_crop = depth[(imh - crop_size) // 2 - crop_y_offset:(imh - crop_size) // 2 + crop_size - crop_y_offset,
(imw - crop_size) // 2:(imw - crop_size) // 2 + crop_size]
# Inpaint
# OpenCV inpainting does weird things at the border.
with TimeIt('Inpainting_Processing'):
depth_crop = cv2.copyMakeBorder(depth_crop, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
depth_nan_mask = np.isnan(depth_crop).astype(np.uint8)
kernel = np.ones((3, 3),np.uint8)
depth_nan_mask = cv2.dilate(depth_nan_mask, kernel, iterations=1)
depth_crop[depth_nan_mask==1] = 0
# Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
depth_scale = np.abs(depth_crop).max()
depth_crop = depth_crop.astype(np.float32) / depth_scale # Has to be float32, 64 not supported.
with TimeIt('Inpainting'):
depth_crop = cv2.inpaint(depth_crop, depth_nan_mask, 1, cv2.INPAINT_NS)
# Back to original size and value range.
depth_crop = depth_crop[1:-1, 1:-1]
depth_crop = depth_crop * depth_scale
with TimeIt('Resizing'):
# Resize
depth_crop = cv2.resize(depth_crop, (out_size, out_size), cv2.INTER_AREA)
if return_mask:
with TimeIt('Return Mask'):
depth_nan_mask = depth_nan_mask[1:-1, 1:-1]
depth_nan_mask = cv2.resize(depth_nan_mask, (out_size, out_size), cv2.INTER_NEAREST)
return depth_crop, depth_nan_mask
else:
return depth_crop