本文整理匯總了Python中cv2.floodFill方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.floodFill方法的具體用法?Python cv2.floodFill怎麽用?Python cv2.floodFill使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.floodFill方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fillhole
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def fillhole(input_image):
'''
input gray binary image get the filled image by floodfill method
Note: only holes surrounded in the connected regions will be filled.
:param input_image:
:return:
'''
im_flood_fill = input_image.copy()
h, w = input_image.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
im_flood_fill = im_flood_fill.astype("uint8")
cv.floodFill(im_flood_fill, mask, (0, 0), 255)
im_flood_fill_inv = cv.bitwise_not(im_flood_fill)
img_out = input_image | im_flood_fill_inv
return img_out
示例2: flood_fill_single
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def flood_fill_single(im, seed_point):
"""Perform a single flood fill operation.
# Arguments
image: an image. the image should consist of white background, black lines and black fills.
the white area is unfilled area, and the black area is filled area.
seed_point: seed point for trapped-ball fill, a tuple (integer, integer).
# Returns
an image after filling.
"""
pass1 = np.full(im.shape, 255, np.uint8)
im_inv = cv2.bitwise_not(im)
mask1 = cv2.copyMakeBorder(im_inv, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)
_, pass1, _, _ = cv2.floodFill(pass1, mask1, seed_point, 0, 0, 0, 4)
return pass1
示例3: remove_remains
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def remove_remains(img, interest_point):
"""
Remove remains which are not adjacent with interest_point
:param img: Input image
:param interest_point: Center point where we want to remain
:return: Image which adjacent with interest_point
"""
img = img.astype(np.uint8)
h, w = img.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
img_inv = img.copy()
cv2.floodFill(img_inv, mask, tuple(interest_point), 0)
img -= img_inv
return img
示例4: SegmentArm
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def SegmentArm(self, frame):
""" segments the arm region based on depth """
# find center (21x21 pixel) region of image frame
centerHalf = 10 # half-width of 21 is 21/2-1
center = frame[self.imgHeight/2-centerHalf:self.imgHeight/2+centerHalf,
self.imgWidth/2-centerHalf:self.imgWidth/2+centerHalf]
# find median depth value of center region
center = np.reshape(center, np.prod(center.shape))
medVal = np.median( np.reshape(center, np.prod(center.shape)) )
# try this instead:
absDepthDev = 14
frame = np.where(abs(frame-medVal) <= absDepthDev, 128, 0).astype(np.uint8)
# morphological
kernel = np.ones((3,3), np.uint8)
frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)
# connected component
smallKernel = 3
frame[self.imgHeight/2-smallKernel:self.imgHeight/2+smallKernel,
self.imgWidth/2-smallKernel:self.imgWidth/2+smallKernel] = 128
mask = np.zeros((self.imgHeight+2,self.imgWidth+2), np.uint8)
flood = frame.copy()
cv2.floodFill(flood, mask, (self.imgWidth/2,self.imgHeight/2), 255, flags=4|(255<<8))
ret,flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY)
return flooded
示例5: update
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def update(dummy=None):
if seed_pt is None:
cv2.imshow('floodfill', img)
return
flooded = img.copy()
mask[:] = 0
lo = cv2.getTrackbarPos('lo', 'floodfill')
hi = cv2.getTrackbarPos('hi', 'floodfill')
flags = connectivity
if fixed_range:
flags |= cv2.FLOODFILL_FIXED_RANGE
cv2.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
cv2.circle(flooded, seed_pt, 2, (0, 0, 255), -1)
cv2.imshow('floodfill', flooded)
示例6: find_all_results
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def find_all_results(self):
"""基於模板匹配查找多個目標區域的方法."""
# 第一步:校驗圖像輸入
check_source_larger_than_search(self.im_source, self.im_search)
# 第二步:計算模板匹配的結果矩陣res
res = self._get_template_result_matrix()
# 第三步:依次獲取匹配結果
result = []
h, w = self.im_search.shape[:2]
while True:
# 本次循環中,取出當前結果矩陣中的最優值
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# 求取可信度:
confidence = self._get_confidence_from_matrix(max_loc, max_val, w, h)
if confidence < self.threshold or len(result) > self.MAX_RESULT_COUNT:
break
# 求取識別位置: 目標中心 + 目標區域:
middle_point, rectangle = self._get_target_rectangle(max_loc, w, h)
one_good_match = generate_result(middle_point, rectangle, confidence)
result.append(one_good_match)
# 屏蔽已經取出的最優結果,進入下輪循環繼續尋找:
# cv2.floodFill(res, None, max_loc, (-1000,), max(max_val, 0), flags=cv2.FLOODFILL_FIXED_RANGE)
cv2.rectangle(res, (int(max_loc[0] - w / 2), int(max_loc[1] - h / 2)), (int(max_loc[0] + w / 2), int(max_loc[1] + h / 2)), (0, 0, 0), -1)
return result if result else None
示例7: fill_hole
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def fill_hole(input_mask):
h, w = input_mask.shape
canvas = np.zeros((h + 2, w + 2), np.uint8)
canvas[1:h + 1, 1:w + 1] = input_mask.copy()
mask = np.zeros((h + 4, w + 4), np.uint8)
cv2.floodFill(canvas, mask, (0, 0), 1)
canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool)
return (~canvas | input_mask.astype(np.uint8))
示例8: trapped_ball_fill_single
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def trapped_ball_fill_single(image, seed_point, radius):
"""Perform a single trapped ball fill operation.
# Arguments
image: an image. the image should consist of white background, black lines and black fills.
the white area is unfilled area, and the black area is filled area.
seed_point: seed point for trapped-ball fill, a tuple (integer, integer).
radius: radius of ball shape.
# Returns
an image after filling.
"""
ball = get_ball_structuring_element(radius)
pass1 = np.full(image.shape, 255, np.uint8)
pass2 = np.full(image.shape, 255, np.uint8)
im_inv = cv2.bitwise_not(image)
# Floodfill the image
mask1 = cv2.copyMakeBorder(im_inv, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)
_, pass1, _, _ = cv2.floodFill(pass1, mask1, seed_point, 0, 0, 0, 4)
# Perform dilation on image. The fill areas between gaps became disconnected.
pass1 = cv2.morphologyEx(pass1, cv2.MORPH_DILATE, ball, anchor=(-1, -1), iterations=1)
mask2 = cv2.copyMakeBorder(pass1, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)
# Floodfill with seed point again to select one fill area.
_, pass2, _, rect = cv2.floodFill(pass2, mask2, seed_point, 0, 0, 0, 4)
# Perform erosion on the fill result leaking-proof fill.
pass2 = cv2.morphologyEx(pass2, cv2.MORPH_ERODE, ball, anchor=(-1, -1), iterations=1)
return pass2
示例9: compute_dismap
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def compute_dismap(dismap, bbox):
x_min, y_min, x_max, y_max = bbox[:]
# draw bounding box
cv2.line(dismap, (x_min, y_min), (x_max, y_min), color=1, thickness=1)
cv2.line(dismap, (x_min, y_min), (x_min, y_max), color=1, thickness=1)
cv2.line(dismap, (x_max, y_max), (x_max, y_min), color=1, thickness=1)
cv2.line(dismap, (x_max, y_max), (x_min, y_max), color=1, thickness=1)
tmp = (dismap > 0).astype(np.uint8) # mark boundary
tmp_ = deepcopy(tmp)
fill_mask = np.ones((tmp.shape[0] + 2, tmp.shape[1] + 2)).astype(np.uint8)
fill_mask[1:-1, 1:-1] = tmp_
cv2.floodFill(tmp_, fill_mask, (int((x_min + x_max) / 2), int((y_min + y_max) / 2)), 5) # fill pixel inside bounding box
tmp_ = tmp_.astype(np.int8)
tmp_[tmp_ == 5] = -1 # pixel inside bounding box
tmp_[tmp_ == 0] = 1 # pixel on and outside bounding box
tmp = (tmp == 0).astype(np.uint8)
dismap = cv2.distanceTransform(tmp, cv2.DIST_L2, cv2.DIST_MASK_PRECISE) # compute distance inside and outside bounding box
dismap = tmp_ * dismap + 128
dismap[dismap > 255] = 255
dismap[dismap < 0] = 0
dismap = dismap.astype(np.uint8)
return dismap
示例10: get_mask
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def get_mask(ucm,viz=False):
ucm = ucm.copy()
h,w = ucm.shape[:2]
mask = np.zeros((h-2,w-2),'float32')
i = 0
sx,sy = np.where(mask==0)
seed = get_seed(sx,sy,ucm)
areas = []
labels=[]
while seed is not None and i<1000:
cv2.floodFill(mask,ucm,seed,i+1)
# calculate the area (no. of pixels):
areas.append(np.sum(mask==i+1))
labels.append(i+1)
# get the location of the next seed:
sx,sy = np.where(mask==0)
seed = get_seed(sx,sy,ucm)
i += 1
print " > terminated in %d steps"%i
if viz:
plt.imshow(mask)
plt.show()
return mask,np.array(areas),np.array(labels)
示例11: _get_traffic_light_channel_from_top_down_rgb
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def _get_traffic_light_channel_from_top_down_rgb(self,
img,
tl_bbox_colors=[[
200, 0, 0
], [13, 0,
196], [5, 200, 0]]):
"""
Returns a mask of the traffic light extent bounding boxes seen from a
top-down view. The bounding boxes in the mask are colored differently
depending on the state of each traffic light.
*Note: Not sure why the colors do not match up with the original colors
the boxes are drawn with. The default colors are estimates learned by
examining the output of the top down camera.
Args:
img: Top-down RGB frame with traffic light extent bboxes drawn.
tl_bbox_colors: The colors of the traffic light extent bboxes.
"""
if tl_bbox_colors is None:
tl_bbox_colors = TL_STATE_TO_PIXEL_COLOR.values()
h, w = img.shape[:2]
tl_mask = np.zeros((h + 2, w + 2), np.uint8)
# Grayscale values for different traffic light states
vals = [33, 66, 99]
for i, bbox_color in enumerate(tl_bbox_colors):
tl_mask_for_bbox_color = np.zeros((h + 2, w + 2), np.uint8)
# Using a tolerance of 20 to locate correct boxes.
mask = np.all(abs(img - bbox_color) < 20, axis=2).astype(np.uint8)
# Flood fill from (0, 0) corner.
cv2.floodFill(mask, tl_mask_for_bbox_color, (0, 0), 1)
# Invert image so mask highlights lights.
tl_mask_for_bbox_color = 1 - tl_mask_for_bbox_color
tl_mask += tl_mask_for_bbox_color * vals[i]
# Remove extra rows and cols added for floodFill.
return tl_mask[1:-1, 1:-1]
示例12: _mouse_callback
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def _mouse_callback(self, event, x, y, flags, *userdata):
if event != cv.EVENT_LBUTTONDOWN:
return
modifier = flags & (ALT_KEY + SHIFT_KEY)
self._flood_mask[:] = 0
cv.floodFill(
self.img,
self._flood_mask,
(x, y),
0,
self.tolerance,
self.tolerance,
self._flood_fill_flags,
)
flood_mask = self._flood_mask[1:-1, 1:-1].copy()
if modifier == (ALT_KEY + SHIFT_KEY):
self.mask = cv.bitwise_and(self.mask, flood_mask)
elif modifier == SHIFT_KEY:
self.mask = cv.bitwise_or(self.mask, flood_mask)
elif modifier == ALT_KEY:
self.mask = cv.bitwise_and(self.mask, cv.bitwise_not(flood_mask))
else:
self.mask = flood_mask
self._update()
示例13: find_largest_feature
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def find_largest_feature(inp_img, scan_tl=None, scan_br=None):
"""
Uses the fact the `floodFill` function returns a bounding box of the area it filled to find the biggest
connected pixel structure in the image. Fills this structure in white, reducing the rest to black.
"""
img = inp_img.copy() # Copy the image, leaving the original untouched
height, width = img.shape[:2]
max_area = 0
seed_point = (None, None)
if scan_tl is None:
scan_tl = [0, 0]
if scan_br is None:
scan_br = [width, height]
# Loop through the image
for x in range(scan_tl[0], scan_br[0]):
for y in range(scan_tl[1], scan_br[1]):
# Only operate on light or white squares
if img.item(y, x) == 255 and x < width and y < height: # Note that .item() appears to take input as y, x
area = cv2.floodFill(img, None, (x, y), 64)
if area[0] > max_area: # Gets the maximum bound area which should be the grid
max_area = area[0]
seed_point = (x, y)
# Colour everything grey (compensates for features outside of our middle scanning range
for x in range(width):
for y in range(height):
if img.item(y, x) == 255 and x < width and y < height:
cv2.floodFill(img, None, (x, y), 64)
mask = np.zeros((height + 2, width + 2), np.uint8) # Mask that is 2 pixels bigger than the image
# Highlight the main feature
if all([p is not None for p in seed_point]):
cv2.floodFill(img, mask, seed_point, 255)
top, bottom, left, right = height, 0, width, 0
for x in range(width):
for y in range(height):
if img.item(y, x) == 64: # Hide anything that isn't the main feature
cv2.floodFill(img, mask, (x, y), 0)
# Find the bounding parameters
if img.item(y, x) == 255:
top = y if y < top else top
bottom = y if y > bottom else bottom
left = x if x < left else left
right = x if x > right else right
bbox = [[left, top], [right, bottom]]
return img, np.array(bbox, dtype='float32'), seed_point
示例14: find_all_template
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import floodFill [as 別名]
def find_all_template(im_source, im_search, threshold=0.8, rgb=False, max_count=10):
"""根據輸入圖片和參數設置,返回所有的圖像識別結果."""
# 第一步:校驗圖像輸入
check_source_larger_than_search(im_source, im_search)
# 第二步:計算模板匹配的結果矩陣res
res = _get_template_result_matrix(im_source, im_search)
# 第三步:依次獲取匹配結果
result = []
h, w = im_search.shape[:2]
while True:
# 本次循環中,取出當前結果矩陣中的最優值
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# 求取可信度:
confidence = _get_confidence_from_matrix(im_source, im_search, max_loc, max_val, w, h, rgb)
if confidence < threshold or len(result) > max_count:
break
# 求取識別位置: 目標中心 + 目標區域:
middle_point, rectangle = _get_target_rectangle(max_loc, w, h)
one_good_match = generate_result(middle_point, rectangle, confidence)
result.append(one_good_match)
# 屏蔽已經取出的最優結果,進入下輪循環繼續尋找:
# cv2.floodFill(res, None, max_loc, (-1000,), max(max_val, 0), flags=cv2.FLOODFILL_FIXED_RANGE)
cv2.rectangle(res, (int(max_loc[0] - w / 2), int(max_loc[1] - h / 2)), (int(max_loc[0] + w / 2), int(max_loc[1] + h / 2)), (0, 0, 0), -1)
return result if result else None