當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.MORPH_CLOSE屬性代碼示例

本文整理匯總了Python中cv2.MORPH_CLOSE屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.MORPH_CLOSE屬性的具體用法?Python cv2.MORPH_CLOSE怎麽用?Python cv2.MORPH_CLOSE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.MORPH_CLOSE屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _morphological_process

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def _morphological_process(image, kernel_size=5):
    """
    morphological process to fill the hole in the binary segmentation result
    :param image:
    :param kernel_size:
    :return:
    """
    if len(image.shape) == 3:
        raise ValueError('Binary segmentation result image should be a single channel image')

    if image.dtype is not np.uint8:
        image = np.array(image, np.uint8)

    kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))

    # close operation fille hole
    closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)

    return closing 
開發者ID:MaybeShewill-CV,項目名稱:lanenet-lane-detection,代碼行數:21,代碼來源:lanenet_postprocess.py

示例2: predict0

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def predict0():
    Vnet3d = Vnet3dModule(256, 256, 64, inference=True, model_path="model\\Vnet3dModule.pd")
    for filenumber in range(30):
        batch_xs = np.zeros(shape=(64, 256, 256))
        for index in range(64):
            imgs = cv2.imread(
                "D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + ".bmp", 0)
            batch_xs[index, :, :] = imgs[128:384, 128:384]

        predictvalue = Vnet3d.prediction(batch_xs)

        for index in range(64):
            result = np.zeros(shape=(512, 512), dtype=np.uint8)
            result[128:384, 128:384] = predictvalue[index]
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
            result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel)
            cv2.imwrite(
                "D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + "mask.bmp",
                result) 
開發者ID:junqiangchen,項目名稱:LiTS---Liver-Tumor-Segmentation-Challenge,代碼行數:21,代碼來源:vnet3d_train_predict.py

示例3: fill_break_line

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def fill_break_line(cw_mask):
	broken_line_h = np.array([[0,0,0,0,0],
							[0,0,0,0,0],
							[1,0,0,0,1],
							[0,0,0,0,0],
							[0,0,0,0,0]], dtype=np.uint8)	
	broken_line_h2 = np.array([[0,0,0,0,0],
							[0,0,0,0,0],
							[1,1,0,1,1],
							[0,0,0,0,0],
							[0,0,0,0,0]], dtype=np.uint8)			
	broken_line_v = np.transpose(broken_line_h)
	broken_line_v2 = np.transpose(broken_line_h2)
	cw_mask = cv2.morphologyEx(cw_mask, cv2.MORPH_CLOSE, broken_line_h)
	cw_mask = cv2.morphologyEx(cw_mask, cv2.MORPH_CLOSE, broken_line_v)
	cw_mask = cv2.morphologyEx(cw_mask, cv2.MORPH_CLOSE, broken_line_h2)
	cw_mask = cv2.morphologyEx(cw_mask, cv2.MORPH_CLOSE, broken_line_v2)

	return cw_mask 
開發者ID:zlzeng,項目名稱:DeepFloorplan,代碼行數:21,代碼來源:util.py

示例4: sobelOperT

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def sobelOperT(self, img, blursize, morphW, morphH):
        '''
            No different with sobelOper ? 
        '''
        blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT)

        if len(blur.shape) == 3:
            gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)
        else:
            gray = blur

        x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, 3)
        absX = cv2.convertScaleAbs(x)
        grad = cv2.addWeighted(absX, 1, 0, 0, 0)

        _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH))
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element)

        return threshold 
開發者ID:SunskyF,項目名稱:EasyPR-python,代碼行數:23,代碼來源:plate_locate.py

示例5: colorSearch

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def colorSearch(self, src, color, out_rect):
        """

        :param src:
        :param color:
        :param out_rect: minAreaRect
        :return: binary
        """
        color_morph_width = 10
        color_morph_height = 2

        match_gray = colorMatch(src, color, False)

        _, src_threshold = cv2.threshold(match_gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (color_morph_width, color_morph_height))
        src_threshold = cv2.morphologyEx(src_threshold, cv2.MORPH_CLOSE, element)

        out = src_threshold.copy()

        _, contours, _ = cv2.findContours(src_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        for cnt in contours:
            mr = cv2.minAreaRect(cnt)
            if self.verifySizes(mr):
                out_rect.append(mr)

        return out 
開發者ID:SunskyF,項目名稱:EasyPR-python,代碼行數:30,代碼來源:plate_locate.py

示例6: verticalEdgeDetection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def verticalEdgeDetection(image):
    image_sobel = cv2.Sobel(image.copy(),cv2.CV_8U,1,0)
    # image = auto_canny(image_sobel)

    # img_sobel, CV_8U, 1, 0, 3, 1, 0, BORDER_DEFAULT
    # canny_image  = auto_canny(image)
    flag,thres = cv2.threshold(image_sobel,0,255,cv2.THRESH_OTSU|cv2.THRESH_BINARY)
    print(flag)
    flag,thres = cv2.threshold(image_sobel,int(flag*0.7),255,cv2.THRESH_BINARY)
    # thres = simpleThres(image_sobel)
    kernal = np.ones(shape=(3,15))
    thres = cv2.morphologyEx(thres,cv2.MORPH_CLOSE,kernal)
    return thres


#確定粗略的左右邊界 
開發者ID:fanghon,項目名稱:lpr,代碼行數:18,代碼來源:pipline.py

示例7: backprojection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def backprojection(target, roihist):
    '''圖像預處理'''
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    cv2.filter2D(dst,-1,disc,dst)
    # threshold and binary AND
    ret,binary = cv2.threshold(dst,80,255,0)
    # 創建 核
    kernel = np.ones((5,5), np.uint8)
    iter_time = 1
    # 閉運算
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)

    thresh = cv2.merge((binary,binary,binary))
    target_filter = cv2.bitwise_and(target,thresh)
    
    return binary, target_filter 
開發者ID:1zlab,項目名稱:1ZLAB_PyEspCar,代碼行數:21,代碼來源:cvutils.py

示例8: _morphological_process

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def _morphological_process(image, kernel_size=5):
        """

        :param image:
        :param kernel_size:
        :return:
        """
        if image.dtype is not np.uint8:
            image = np.array(image, np.uint8)
        if len(image.shape) == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))

        # close operation fille hole
        closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=1)

        return closing 
開發者ID:stesha2016,項目名稱:lanenet-enet-hnet,代碼行數:20,代碼來源:lanenet_postprocess.py

示例9: get_target_centers

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def get_target_centers(img):

    # Hide buff line
    # img[0:70, 0:500] = (0, 0, 0)

    # Hide your name in first camera position (default)
    img[210:230, 350:440] = (0, 0, 0)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # cv2.imwrite('1_gray_img.png', gray)

    # Find only white text
    ret, threshold1 = cv2.threshold(gray, 252, 255, cv2.THRESH_BINARY)
    # cv2.imwrite('2_threshold1_img.png', threshold1)

    # Morphological transformation
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50, 5))
    closed = cv2.morphologyEx(threshold1, cv2.MORPH_CLOSE, kernel)
    # cv2.imwrite('3_morphologyEx_img.png', closed)
    closed = cv2.erode(closed, kernel, iterations=1)
    # cv2.imwrite('4_erode_img.png', closed)
    closed = cv2.dilate(closed, kernel, iterations=1)
    # cv2.imwrite('5_dilate_img.png', closed)

    (_, centers, hierarchy) = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    return centers 
開發者ID:maaaxim,項目名稱:bot,代碼行數:27,代碼來源:functions.py

示例10: getBlobContours

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def getBlobContours(ROI_image, 
                    thresh, 
                    strel_size=(5, 5), 
                    is_light_background=True, 
                    analysis_type="WORM", 
                    thresh_block_size=15):

    
    ROI_image = _remove_corner_blobs(ROI_image)
    ROI_mask, thresh = _get_blob_mask(ROI_image, thresh, thresh_block_size, is_light_background, analysis_type)
    
    # clean it using morphological closing - make this optional by setting strel_size to 0
    if np.all(strel_size):
        strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, strel_size)
        ROI_mask = cv2.morphologyEx(ROI_mask, cv2.MORPH_CLOSE, strel)

    # get worms, assuming each contour in the ROI is a worm

    ROI_worms, hierarchy = cv2.findContours(ROI_mask, 
                                               cv2.RETR_EXTERNAL, 
                                               cv2.CHAIN_APPROX_NONE)[-2:]


    return ROI_worms, hierarchy 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:26,代碼來源:getBlobTrajectories.py

示例11: getBlobsSimple

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def getBlobsSimple(in_data, blob_params):
    frame_number, image = in_data
    min_area, worm_bw_thresh_factor, strel_size = blob_params
    
    
    img_m = cv2.medianBlur(image, 3)
    
    valid_pix = img_m[img_m>0]
    if len(valid_pix) == 0:
        return []
    
    th = _thresh_bw(valid_pix)*worm_bw_thresh_factor
    
    _, bw = cv2.threshold(img_m, th,255,cv2.THRESH_BINARY)
    if np.all(strel_size):
        strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, strel_size)
        bw = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, strel)

    cnts, hierarchy = cv2.findContours(bw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
    
    
    blobs_data = _cnt_to_props(cnts, frame_number, th, min_area)
    return blobs_data 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:25,代碼來源:getBlobTrajectories.py

示例12: colorTarget

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))):

    image = cam.newImage()
    if filter == 'RGB':
        frame_to_thresh = image.copy()
    else:
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)                            # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

    thresh = cv2.inRange(frame_to_thresh, color_range[0], color_range[1])

    # apply a blur function
    kernel = np.ones((5, 5), np.uint8)
    mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)                                 # Apply blur
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)                                  # Apply blur 2nd iteration

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]    # generates number of contiguous "1" pixels
    if len(cnts) > 0:                                                                       # begin processing if there are "1" pixels discovered
        c = max(cnts, key=cv2.contourArea)                                                  # return the largest target area
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        return np.array([round(x, 1), round(y, 1), round(radius, 1)])
    else:
        return np.array([None, None, 0]) 
開發者ID:MXET,項目名稱:SCUTTLE,代碼行數:24,代碼來源:L2_track_target.py

示例13: detectLevel

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def detectLevel(self, raidpic, hash, raidNo, radius):
        foundlvl = None
        lvl = None
        
        log.info('[Crop: ' + str(raidNo) + ' (' + str(self.uniqueHash) +') ] ' + 'Scanning Level')
        height, width, channel = raidpic.shape
        raidlevel = raidpic[int(round(radius*2*0.03)+(2*radius)+(radius*2*0.43)):int(round(radius*2*0.03)+(2*radius)+(radius*2*0.68)), 0:width]
        raidlevel = cv2.resize(raidlevel, (0,0), fx=0.5, fy=0.5) 

        imgray = cv2.cvtColor(raidlevel, cv2.COLOR_BGR2GRAY)
        imgray = cv2.GaussianBlur(imgray, (9, 9), 2)
        #kernel = np.ones((5,5),np.uint8)
        #imgray = cv2.morphologyEx(imgray, cv2.MORPH_CLOSE, kernel)
        ret, thresh = cv2.threshold(imgray, 220, 255,0)
        (_, contours, _) = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        
        lvl = len(contours)-1
        
        if lvl >=1 and lvl <=5:
            
            log.info('[Crop: ' + str(raidNo) + ' (' + str(self.uniqueHash) +') ] ' + 'detectLevel: found level %s' % str(lvl))
            return lvl
            
        log.info('[Crop: ' + str(raidNo) + ' (' + str(self.uniqueHash) +') ] ' + 'detectLevel: could not find level')
        return None 
開發者ID:Grennith,項目名稱:Map-A-Droid,代碼行數:27,代碼來源:segscanner.py

示例14: compute_missing_cells_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def compute_missing_cells_mask(self, close_ksize=5):
        """
        Compute a binary img-scale mask,
        """
        # Create white binary img
        icellmask = np.full((self.imgshape[0], self.imgshape[1]), 255, np.uint8)

        # Mask everything except table, as defined by corner nodes (not the larger super-node!)
        cv2.fillConvexPoly(icellmask, self.table_corners, 0)
        # Now draw all cell hulls without text, but don't downsize them()
        self.draw_all_cell_hulls(icellmask, None, xscale=1.1, yscale=1.1)

        # Morphology ops with large kernel to remove small intercell speckles
        # NOTE: CLOSE => remove black holes
        icellmask = cv2.morphologyEx(icellmask, cv2.MORPH_CLOSE,
                                     np.ones((close_ksize, close_ksize), np.uint8))
        return icellmask 
開發者ID:ulikoehler,項目名稱:OTR,代碼行數:19,代碼來源:TableRecognition.py

示例15: motionDetected

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import MORPH_CLOSE [as 別名]
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (21, 21), 0)

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        frameDiff = cv.absdiff(gray, self.prevFrame)

        # kernel = np.ones((5, 5), np.uint8)

        opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None)  # noqa
        closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None)  # noqa

        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        height = np.size(th1, 0)
        width = np.size(th1, 1)

        nb = cv.countNonZero(th1)

        avg = (nb * 100) / (height * width)  # Calculate the average of black pixel in the image

        self.prevFrame = gray

        # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
        # cv.imshow("frame", current_frame)

        ret = avg > self.threshold   # If over the ceiling trigger the alarm

        if ret:
            self.updateMotionDetectionDts()

        return ret 
開發者ID:JFF-Bohdan,項目名稱:pynvr,代碼行數:39,代碼來源:motion_detection.py


注:本文中的cv2.MORPH_CLOSE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。