本文整理匯總了Python中cv2.MORPH_ELLIPSE屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.MORPH_ELLIPSE屬性的具體用法?Python cv2.MORPH_ELLIPSE怎麽用?Python cv2.MORPH_ELLIPSE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.MORPH_ELLIPSE屬性的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _morphological_process
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_ELLIPSE [as 別名]
def _morphological_process(image, kernel_size=5):
"""
morphological process to fill the hole in the binary segmentation result
:param image:
:param kernel_size:
:return:
"""
if len(image.shape) == 3:
raise ValueError('Binary segmentation result image should be a single channel image')
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)
return closing
示例2: _morphological_process
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_ELLIPSE [as 別名]
def _morphological_process(image, kernel_size=5):
"""
:param image:
:param kernel_size:
:return:
"""
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
if len(image.shape) == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)
return closing
示例3: border
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_ELLIPSE [as 別名]
def border(self, alpha, size, kernel_type = 'RECT'):
kdict = {'RECT':cv2.MORPH_RECT, 'ELLIPSE':cv2.MORPH_ELLIPSE,
'CROSS':cv2.MORPH_CROSS}
kernel = cv2.getStructuringElement(kdict[kernel_type], (size, size))
border = cv2.dilate(alpha, kernel, iterations = 1) # - alpha
return border
示例4: border
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_ELLIPSE [as 別名]
def border(self, alpha, size, kernel_type='RECT'):
"""
alpha : alpha layer of the text
size : size of the kernel
kernel_type : one of [rect,ellipse,cross]
@return : alpha layer of the border (color to be added externally).
"""
kdict = {'RECT':cv.MORPH_RECT, 'ELLIPSE':cv.MORPH_ELLIPSE,
'CROSS':cv.MORPH_CROSS}
kernel = cv.getStructuringElement(kdict[kernel_type],(size,size))
border = cv.dilate(alpha,kernel,iterations=1) # - alpha
return border
示例5: build_kernel
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_ELLIPSE [as 別名]
def build_kernel(kernel_type, kernel_size):
"""Creates the specific kernel: MORPH_ELLIPSE, MORPH_CROSS or MORPH_RECT"""
if kernel_type == cv2.MORPH_ELLIPSE:
# We build a elliptical kernel
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_size)
elif kernel_type == cv2.MORPH_CROSS:
# We build a cross-shape kernel
return cv2.getStructuringElement(cv2.MORPH_CROSS, kernel_size)
else: # cv2.MORPH_RECT
# We build a rectangular kernel:
return cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
# This function erodes the image
示例6: filter_lane_points
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_ELLIPSE [as 別名]
def filter_lane_points(self,
img,
filter_type='bilateral',
ksize_r=25,
C_r=8,
ksize_b=35,
C_b=5,
mask_noise=False,
ksize_noise=65,
C_noise=10,
noise_thresh=135):
'''
Filter an image to isolate lane lines and return a binary version.
All image color space conversion, thresholding, filtering and morphing
happens inside this method. It takes an RGB color image as input and
returns a binary filtered version.
'''
# Define structuring elements for cv2 functions
strel_lab_b = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(55,55))
strel_rgb_r = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(29,29))
strel_open = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(5,5))
# Extract RGB R-channel and LAB B-channel
rgb_r_channel = img[:,:,0]
lab_b_channel = (cv2.cvtColor(img, cv2.COLOR_RGB2LAB))[:,:,2]
# Apply tophat morphology
rgb_r_tophat = cv2.morphologyEx(rgb_r_channel, cv2.MORPH_TOPHAT, strel_rgb_r, iterations=1)
lab_b_tophat = cv2.morphologyEx(lab_b_channel, cv2.MORPH_TOPHAT, strel_lab_b, iterations=1)
if filter_type == 'bilateral':
# Apply bilateral adaptive color thresholding
rgb_r_thresh = bilateral_adaptive_threshold(rgb_r_tophat, ksize=ksize_r, C=C_r)
lab_b_thresh = bilateral_adaptive_threshold(lab_b_tophat, ksize=ksize_b, C=C_b)
elif filter_type == 'neighborhood':
rgb_r_thresh = cv2.adaptiveThreshold(rgb_r_channel, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=ksize_r, C=-C_r)
lab_b_thresh = cv2.adaptiveThreshold(lab_b_channel, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=ksize_b, C=-C_b)
else:
raise ValueError("Unexpected filter mode. Expected modes are 'bilateral' or 'neighborhood'.")
if mask_noise: # Merge both color channels and the noise mask
# Create a mask to filter out noise such as trees and other greenery based on the LAB B-channel
noise_mask_part1 = cv2.inRange(lab_b_channel, noise_thresh, 255) # This catches the noise, but unfortunately also the yellow line, therefore...
noise_mask_part2 = bilateral_adaptive_threshold(lab_b_channel, ksize=ksize_noise, C=C_noise) # ...this brings the yellow line back...
noise_bool = np.logical_or(np.logical_not(noise_mask_part1), noise_mask_part2) # ...once we combine the two.
noise_mask = np.zeros_like(rgb_r_channel, dtype=np.uint8)
noise_mask[noise_bool] = 255
merged_bool = np.logical_and(np.logical_or(rgb_r_thresh, lab_b_thresh), noise_mask)
merged = np.zeros_like(rgb_r_channel, dtype=np.uint8)
merged[merged_bool] = 255
else: # Only merge the two color channels
merged_bool = np.logical_or(rgb_r_thresh, lab_b_thresh)
merged = np.zeros_like(rgb_r_channel, dtype=np.uint8)
merged[merged_bool] = 255
# Apply open morphology
opened = cv2.morphologyEx(merged, cv2.MORPH_OPEN, strel_open, iterations=1)
return opened