當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.medianBlur方法代碼示例

本文整理匯總了Python中cv2.medianBlur方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.medianBlur方法的具體用法?Python cv2.medianBlur怎麽用?Python cv2.medianBlur使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.medianBlur方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def main():
	capture = cv2.VideoCapture(0)
	_, image = capture.read()
	previous = image.copy()
	
	
	while (cv2.waitKey(1) < 0):
		_, image = capture.read()
		diff = cv2.absdiff(image, previous)
		#image = cv2.flip(image, 3)
		#image = cv2.norm(image)
		_, diff = cv2.threshold(diff, 32, 0, cv2.THRESH_TOZERO)
		_, diff = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY)
		
		diff = cv2.medianBlur(diff, 5)
		
		cv2.imshow('video', diff)
		previous = image.copy()
		
	capture.release()
	cv2.destroyAllWindows() 
開發者ID:petern3,項目名稱:crop_row_detection,代碼行數:23,代碼來源:camera_test.py

示例2: param_filter

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def param_filter(self, frame):
        # Apply pre-blur according to trackbar value.
        if self.pre_blur_val == 1:
            frame = cv2.GaussianBlur(frame, (5, 5), 0)
        elif self.pre_blur_val == 2:
            frame = cv2.medianBlur(frame, 5)

        # Apply a thresholding method according to trackbar value.
        if self.thresh_flag:
            _, frame = cv2.threshold(frame, 127, 255, cv2.THRESH_BINARY)
        else:
            _, frame = cv2.threshold(frame, 127, 255, cv2.THRESH_OTSU)

        # Apply post-blur according to trackbar value.
        if self.post_blur_val:
            frame = cv2.medianBlur(frame, 5)

        return frame


    # Apply filterrs to frame according to contour parameters. 
開發者ID:jpnaterer,項目名稱:smashscan,代碼行數:23,代碼來源:thresholding.py

示例3: generate

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def generate(path):
    global cur_rgb_image
    if cur_rgb_image is not None:
        print('process......')
        el_img, er_img, angle, re_angle, os_l, os_r = get_input_from_image()
        el, er = get_output_from_sess(el_img, er_img, angle, re_angle)

        new_image = np.copy(cur_rgb_image)
        new_image = helper.replace(new_image, el, os_l)
        rgb_new_image = helper.replace(new_image, er, os_r)
        # bgr_new_image = cv2.cvtColor(rgb_new_image, cv2.COLOR_RGB2BGR)
        # cv2.imshow('deepwarp', bgr_new_image)

        # if chk_btn.get() == True:
        #     rgb_new_image = cv2.medianBlur(rgb_new_image, 3)

        global label_img
        img_wapper = ImageTk.PhotoImage(Image.fromarray(rgb_new_image))
        label_img.configure(image=img_wapper)
        label_img.image = img_wapper
        return rgb_new_image
    else:
        print('no image......')
        return None 
開發者ID:BlueWinters,項目名稱:DeepWarp,代碼行數:26,代碼來源:gui.py

示例4: _median_pool_cv2

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def _median_pool_cv2(arr, block_size, pad_mode, pad_cval):
    from imgaug.augmenters.size import pad_to_multiples_of

    ndim_in = arr.ndim

    shape = arr.shape
    if shape[0] % block_size != 0 or shape[1] % block_size != 0:
        arr = pad_to_multiples_of(
            arr,
            height_multiple=block_size,
            width_multiple=block_size,
            mode=pad_mode,
            cval=pad_cval
        )

    arr = cv2.medianBlur(arr, block_size)

    if arr.ndim < ndim_in:
        arr = arr[:, :, np.newaxis]

    start_height = (block_size - 1) // 2
    start_width = (block_size - 1) // 2
    return arr[start_height::block_size, start_width::block_size] 
開發者ID:aleju,項目名稱:imgaug,代碼行數:25,代碼來源:imgaug.py

示例5: extracttext

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def extracttext(imgpath, preprocess):
    if imgpath.startswith('http://') or imgpath.startswith('https://') or imgpath.startswith('ftp://'):
        image = url_to_image(imgpath)
    else:
        image = cv2.imread(imgpath)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    if preprocess == "thresh":
        gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    elif preprocess == "blur":
        gray = cv2.medianBlur(gray, 3)

    filename = "{}.png".format(os.getpid())
    cv2.imwrite(filename, gray)
    text = pytesseract.image_to_string(Image.open(filename))

    os.remove(filename)
    return {"text": text} 
開發者ID:tech-quantum,項目名稱:sia-cog,代碼行數:20,代碼來源:cvmgr.py

示例6: getPaperFromImage

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def getPaperFromImage(img):
    gImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    bImg = cv2.medianBlur(src = gImg, ksize = 51)

    threshold, _ = cv2.threshold(src = bImg, thresh = 0, maxval = 255, type = cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    cannyImg = cv2.Canny(image = bImg, threshold1 = 0.5 * threshold, threshold2 = threshold)

    _, contours, _ = cv2.findContours(image = cannyImg.copy(), mode = cv2.RETR_TREE, method = cv2.CHAIN_APPROX_SIMPLE)

    maxRect = Rect(0, 0, 0, 0)
    for contour in contours:
        x, y, w, h = cv2.boundingRect(points = contour)
        currentArea = w * h
        if currentArea > maxRect.getArea():
            maxRect.set(x, y, w, h)

    return img[maxRect.y : maxRect.y + maxRect.h, maxRect.x : maxRect.x + maxRect.w] 
開發者ID:vzat,項目名稱:signature_extractor,代碼行數:19,代碼來源:getPaper.py

示例7: getMonMask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def getMonMask(self, img):
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # define range of blue color in HSV
        lower_blue = np.array([94, 130, 70])
        upper_blue = np.array([114, 160, 110])

        # Threshold the HSV image to get only shadow colors
        mask = cv2.inRange(hsv, lower_blue, upper_blue)
        kernel = np.ones((2, 2), np.uint8)
        mask = cv2.dilate(mask, kernel, iterations=1)
        final_mask = 255 - cv2.medianBlur(mask, 3) # invert mask

        return final_mask

    # Detect gym from raid sighting image 
開發者ID:mzsmakr,項目名稱:PGSS,代碼行數:18,代碼來源:raidnearby.py

示例8: _get_blob_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def _get_blob_mask(ROI_image, thresh, thresh_block_size, is_light_background, analysis_type):
    # get binary image, 
    if is_light_background:
        ## apply a median filter to reduce rough edges / sharpen the boundary btw worm and background
        ROI_image_th = cv2.medianBlur(ROI_image, 3)
        ROI_mask = ROI_image_th < thresh
    else:
        if analysis_type == "PHARYNX":
            # for fluorescent pharynx labeled images, refine the threshold with a local otsu (http://scikit-image.org/docs/dev/auto_examples/plot_local_otsu.html)
            # this compensates for local variations in brightness in high density regions, when many worms are close to each other
            ROI_rank_otsu = skf.rank.otsu(ROI_image, skm.disk(thresh_block_size))
            ROI_mask = (ROI_image>ROI_rank_otsu)
            # as a local threshold introcudes artifacts at the edge of the mask, also use a global threshold to cut these out
            ROI_mask &= (ROI_image>=thresh)
        else:
            # this case applies for example to worms where the whole body is fluorecently labeled
            ROI_image_th = cv2.medianBlur(ROI_image, 3)
            ROI_mask = ROI_image_th >= thresh
        
    ROI_mask &= (ROI_image != 0)
    ROI_mask = ROI_mask.astype(np.uint8)

    return ROI_mask, thresh # returning thresh here seems redundant, as it isn't actually changed 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:25,代碼來源:getBlobTrajectories.py

示例9: getBlobsSimple

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def getBlobsSimple(in_data, blob_params):
    frame_number, image = in_data
    min_area, worm_bw_thresh_factor, strel_size = blob_params
    
    
    img_m = cv2.medianBlur(image, 3)
    
    valid_pix = img_m[img_m>0]
    if len(valid_pix) == 0:
        return []
    
    th = _thresh_bw(valid_pix)*worm_bw_thresh_factor
    
    _, bw = cv2.threshold(img_m, th,255,cv2.THRESH_BINARY)
    if np.all(strel_size):
        strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, strel_size)
        bw = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, strel)

    cnts, hierarchy = cv2.findContours(bw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
    
    
    blobs_data = _cnt_to_props(cnts, frame_number, th, min_area)
    return blobs_data 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:25,代碼來源:getBlobTrajectories.py

示例10: get_dark_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def get_dark_mask(full_data):
    #get darker objects that are unlikely to be worm
    if full_data.shape[0] < 2:
        #nothing to do here returning
        return np.zeros((full_data.shape[1], full_data.shape[2]), np.uint8)
    
    #this mask shoulnd't contain many worms
    img_h = cv2.medianBlur(np.max(full_data, axis=0), 5)
    #this mask is likely to contain a lot of worms
    img_l = cv2.medianBlur(np.min(full_data, axis=0), 5)
    
    #this is the difference (the tagged pixels should be mostly worms)
    img_del = img_h-img_l
    th_d = threshold_otsu(img_del)
    
    #this is the maximum of the minimum pixels of the worms...
    th = np.max(img_l[img_del>th_d])
    #this is what a darkish mask should look like
    dark_mask = cv2.dilate((img_h<th).astype(np.uint8), disk(11))
    
    return dark_mask 
開發者ID:ver228,項目名稱:tierpsy-tracker,代碼行數:23,代碼來源:getFoodContourMorph.py

示例11: denoise

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def denoise(varr, method, **kwargs):
    if method == 'gaussian':
        func = cv2.GaussianBlur
    elif method == 'anisotropic':
        func = anisotropic_diffusion
    elif method == 'median':
        func = cv2.medianBlur
    elif method == 'bilateral':
        func = cv2.bilateralFilter
    else:
        raise NotImplementedError(
            "denoise method {} not understood".format(method))
    res = xr.apply_ufunc(
        func,
        varr,
        input_core_dims=[['height', 'width']],
        output_core_dims=[['height', 'width']],
        vectorize=True,
        dask='parallelized',
        output_dtypes=[varr.dtype],
        kwargs=kwargs)
    return res.rename(varr.name + "_denoised") 
開發者ID:DeniseCaiLab,項目名稱:minian,代碼行數:24,代碼來源:preprocessing.py

示例12: get_name

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def get_name(img):
        #    cv2.imshow("method3", img)
        #    cv2.waitKey()
        print('name')
        _, _, red = cv2.split(img) #split 會自動將UMat轉換回Mat
        red = cv2.UMat(red)
        red = hist_equal(red)
        red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 151, 50)
        #    red = cv2.medianBlur(red, 3)
        red = img_resize(red, 150)
        img = img_resize(img, 150)
        # showimg(red)
        # cv2.imwrite('name.png', red)
        #    img2 = Image.open('address.png')
        # img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
        #return get_result_vary_length(red, 'chi_sim', img, '-psm 7')
        return get_result_vary_length(red, 'chi_sim', img, '--psm 7')
        # return punc_filter(pytesseract.image_to_string(img, lang='chi_sim', config='-psm 13').replace(" ","")) 
開發者ID:Raymondhhh90,項目名稱:idcardocr,代碼行數:20,代碼來源:idcardocr.py

示例13: cartoonise

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def cartoonise(self, img_rgb, num_down, num_bilateral, medianBlur, D, sigmaColor, sigmaSpace):
        # 用高斯金字塔降低取樣
        img_color = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
        for _ in range(num_down):
            img_color = cv2.pyrDown(img_color)
        # 重複使用小的雙邊濾波代替一個大的濾波
        for _ in range(num_bilateral):
            img_color = cv2.bilateralFilter(img_color, d=D, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace)
        # 升采樣圖片到原始大小
        for _ in range(num_down):
            img_color = cv2.pyrUp(img_color)
        if not self.Save_Edge:
            img_cartoon = img_color
        else:
            img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
            img_blur = cv2.medianBlur(img_gray, medianBlur)
            img_edge = cv2.adaptiveThreshold(img_blur, 255,
                                             cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                             cv2.THRESH_BINARY,
                                             blockSize=self.Adaptive_Threshold_Block_Size,
                                             C=self.C)
            img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
            img_edge = cv2.resize(img_edge, img_color.shape[:2][::-1])
            img_cartoon = cv2.bitwise_and(img_color, img_edge)
        return cv2.cvtColor(img_cartoon, cv2.COLOR_RGB2BGR) 
開發者ID:MashiMaroLjc,項目名稱:rabbitVE,代碼行數:27,代碼來源:Cartoonlization.py

示例14: sketch_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def sketch_image(img):
    """Sketches the image applying a laplacian operator to detect the edges"""

    # Convert to gray scale
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Apply median filter
    img_gray = cv2.medianBlur(img_gray, 5)

    # Detect edges using cv2.Laplacian()
    edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5)

    # Threshold the edges image:
    ret, thresholded = cv2.threshold(edges, 70, 255, cv2.THRESH_BINARY_INV)

    return thresholded 
開發者ID:PacktPublishing,項目名稱:Mastering-OpenCV-4-with-Python,代碼行數:18,代碼來源:cartoonizing.py

示例15: __call__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import medianBlur [as 別名]
def __call__(self, image, label):


        #aug blur
        if random.random() > set_ratio:
            select = random.random()
            if select < 0.3:
                kernalsize = random.choice([3, 5])
                image = cv2.GaussianBlur(image, (kernalsize, kernalsize), 0)
            elif select < 0.6:
                kernalsize = random.choice([3, 5])
                image = cv2.medianBlur(image, kernalsize)
            else:
                kernalsize = random.choice([3, 5])
                image = cv2.blur(image, (kernalsize, kernalsize))

        # aug noise
        if random.random() > set_ratio:
            mu = 0
            sigma = random.random() * 10.0
            image = np.array(image, dtype=np.float32)
            image += np.random.normal(mu, sigma, image.shape)
            image[image > 255] = 255
            image[image < 0] = 0

        # aug_color
        if random.random() > set_ratio:

            random_factor = np.random.randint(4, 17) / 10.
            color_image = ImageEnhance.Color(image).enhance(random_factor)
            random_factor = np.random.randint(4, 17) / 10.
            brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor)
            random_factor = np.random.randint(6, 15) / 10.
            contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor)
            random_factor = np.random.randint(8, 13) / 10.
            image = ImageEnhance.Sharpness(contrast_image).enhance(random_factor)

        return np.array(image), label 
開發者ID:clovaai,項目名稱:ext_portrait_segmentation,代碼行數:40,代碼來源:CVTransforms.py


注:本文中的cv2.medianBlur方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。