本文整理匯總了Python中cv2.getStructuringElement方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.getStructuringElement方法的具體用法?Python cv2.getStructuringElement怎麽用?Python cv2.getStructuringElement使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.getStructuringElement方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: find_components
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def find_components(im, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
dilation = dilate(im, kernel, 6)
count = 21
n = 0
sigma = 0.000
while count > max_components:
n += 1
sigma += 0.005
result = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(result) == 3:
_, contours, hierarchy = result
elif len(result) == 2:
contours, hierarchy = result
possible = find_likely_rectangles(contours, sigma)
count = len(possible)
return (dilation, possible, n)
示例2: spline_transform_multi
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def spline_transform_multi(img, mask):
bimask=mask>0
M,N=np.where(bimask)
w=np.ptp(N)+1
h=np.ptp(M)+1
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
bound=cv2.dilate(bimask.astype('uint8'),kernel)-bimask
y,x=np.where(bound>0)
if x.size>4:
newxy=thin_plate_transform(x,y,w,h,mask.shape[:2],num_points=5)
new_img=cv2.remap(img,newxy,None,cv2.INTER_LINEAR)
new_msk=cv2.remap(mask,newxy,None,cv2.INTER_NEAREST)
elif x.size>0:
new_img=img
new_msk=mask
return new_img,new_msk
示例3: SeamlessClone_trimap
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def SeamlessClone_trimap(srcIm,dstIm,imMask,offX,offY):
dstIm=dstIm.copy()
bimsk=imMask>0
new_msk=np.zeros(dstIm.shape[:2],dtype='uint8')
new_msk[offY:offY+imMask.shape[0],offX:offX+imMask.shape[1]]=imMask
dstIm[new_msk>0]=srcIm[imMask>0]
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
bimsk=bimsk.astype('uint8')
bdmsk=cv2.dilate(bimsk,kernel)-cv2.erode(bimsk,kernel)
mask255=bdmsk>0
mask255=(mask255*255).astype('uint8')
offCenter=(int(offX+imMask.shape[1]/2),int(offY+imMask.shape[0]/2))
if np.any(bdmsk>0):
outputIm=cv2.seamlessClone(srcIm,dstIm,mask255,offCenter,cv2.MIXED_CLONE)
else:
outputIm=dstIm
#when one object have very few pixels, bdmsk will be totally zero, which will cause segmentation fault.
return outputIm,new_msk
示例4: _morphological_process
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def _morphological_process(image, kernel_size=5):
"""
morphological process to fill the hole in the binary segmentation result
:param image:
:param kernel_size:
:return:
"""
if len(image.shape) == 3:
raise ValueError('Binary segmentation result image should be a single channel image')
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)
return closing
示例5: predict0
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def predict0():
Vnet3d = Vnet3dModule(256, 256, 64, inference=True, model_path="model\\Vnet3dModule.pd")
for filenumber in range(30):
batch_xs = np.zeros(shape=(64, 256, 256))
for index in range(64):
imgs = cv2.imread(
"D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + ".bmp", 0)
batch_xs[index, :, :] = imgs[128:384, 128:384]
predictvalue = Vnet3d.prediction(batch_xs)
for index in range(64):
result = np.zeros(shape=(512, 512), dtype=np.uint8)
result[128:384, 128:384] = predictvalue[index]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel)
cv2.imwrite(
"D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + "mask.bmp",
result)
開發者ID:junqiangchen,項目名稱:LiTS---Liver-Tumor-Segmentation-Challenge,代碼行數:21,代碼來源:vnet3d_train_predict.py
示例6: update
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def update(dummy=None):
sz = cv2.getTrackbarPos('op/size', 'morphology')
iters = cv2.getTrackbarPos('iters', 'morphology')
opers = cur_mode.split('/')
if len(opers) > 1:
sz = sz - 10
op = opers[sz > 0]
sz = abs(sz)
else:
op = opers[0]
sz = sz*2+1
str_name = 'MORPH_' + cur_str_mode.upper()
oper_name = 'MORPH_' + op.upper()
st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz))
res = cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters)
draw_str(res, (10, 20), 'mode: ' + cur_mode)
draw_str(res, (10, 40), 'operation: ' + oper_name)
draw_str(res, (10, 60), 'structure: ' + str_name)
draw_str(res, (10, 80), 'ksize: %d iters: %d' % (sz, iters))
cv2.imshow('morphology', res)
示例7: skeletonize
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def skeletonize(image_in):
'''Inputs and grayscale image and outputs a binary skeleton image'''
size = np.size(image_in)
skel = np.zeros(image_in.shape, np.uint8)
ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
done = False
while not done:
eroded = cv2.erode(image_edit, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(image_edit, temp)
skel = cv2.bitwise_or(skel, temp)
image_edit = eroded.copy()
zeros = size - cv2.countNonZero(image_edit)
if zeros == size:
done = True
return skel
示例8: blend_non_transparent
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def blend_non_transparent(sprite, background_img):
gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY)
overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]
overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
overlay_mask = cv2.blur(overlay_mask, (3, 3))
background_mask = 255 - overlay_mask
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0))
示例9: maximizeContrast
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def maximizeContrast(imgGrayscale):
height, width = imgGrayscale.shape
imgTopHat = np.zeros((height, width, 1), np.uint8)
imgBlackHat = np.zeros((height, width, 1), np.uint8)
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)
imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)
imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
return imgGrayscalePlusTopHatMinusBlackHat
# end function
示例10: generate_edge
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def generate_edge(label, edge_width=10, area_thrs=200):
label_list = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
edge = np.zeros_like(label, dtype=np.uint8)
for i in np.unique(label):
# have no instance
if i < 1000 or (i // 1000) not in label_list:
continue
# filter out small objects
mask = (label == i).astype(np.uint8)
if mask.sum() < area_thrs:
continue
rmin, rmax, cmin, cmax = _get_bbox(mask)
mask_edge = _generate_edge(mask[rmin:rmax+1, cmin:cmax+1])
edge[rmin:rmax+1, cmin:cmax+1][mask_edge > 0] = 255
# dilation on edge
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
edge = cv2.dilate(edge, kernel)
return edge
示例11: sobelOperT
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def sobelOperT(self, img, blursize, morphW, morphH):
'''
No different with sobelOper ?
'''
blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT)
if len(blur.shape) == 3:
gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)
else:
gray = blur
x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, 3)
absX = cv2.convertScaleAbs(x)
grad = cv2.addWeighted(absX, 1, 0, 0, 0)
_, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH))
threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element)
return threshold
示例12: colorSearch
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def colorSearch(self, src, color, out_rect):
"""
:param src:
:param color:
:param out_rect: minAreaRect
:return: binary
"""
color_morph_width = 10
color_morph_height = 2
match_gray = colorMatch(src, color, False)
_, src_threshold = cv2.threshold(match_gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_RECT, (color_morph_width, color_morph_height))
src_threshold = cv2.morphologyEx(src_threshold, cv2.MORPH_CLOSE, element)
out = src_threshold.copy()
_, contours, _ = cv2.findContours(src_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
mr = cv2.minAreaRect(cnt)
if self.verifySizes(mr):
out_rect.append(mr)
return out
示例13: findPiccircle
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def findPiccircle(frame, color):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
color_dict = color_list.getColorList()
mask = cv2.inRange(hsv, color_dict[color][0], color_dict[color][1])
dilated = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=2)
## 需要修改minRadius以及maxRadius,用來限製識別圓的大小,排除其他的幹擾
circles = cv2.HoughCircles(dilated, cv2.HOUGH_GRADIENT, 1, 1000, param1=15, param2=10, minRadius=15, maxRadius=50)
center = None
if circles is not None:
x, y, radius = circles[0][0]
center = (x, y)
cv2.circle(frame, center, radius, (0, 255, 0), 2)
cv2.circle(frame, center, 2, (0,255,0), -1, 8, 0 );
print('圓心:{}, {}'.format(x, y))
cv2.imshow('result', frame)
if center != None:
return center
示例14: blend_non_transparent
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def blend_non_transparent(face_img, overlay_img):
# Let's find a mask covering all the non-black (foreground) pixels
# NB: We need to do this on grayscale version of the image
gray_overlay = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]
# Let's shrink and blur it a little to make the transitions smoother...
overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
overlay_mask = cv2.blur(overlay_mask, (3, 3))
# And the inverse mask, that covers all the black (background) pixels
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
示例15: backprojection
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getStructuringElement [as 別名]
def backprojection(target, roihist):
'''圖像預處理'''
hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
# Now convolute with circular disc
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
cv2.filter2D(dst,-1,disc,dst)
# threshold and binary AND
ret,binary = cv2.threshold(dst,80,255,0)
# 創建 核
kernel = np.ones((5,5), np.uint8)
iter_time = 1
# 閉運算
binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)
thresh = cv2.merge((binary,binary,binary))
target_filter = cv2.bitwise_and(target,thresh)
return binary, target_filter