當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.fastNlMeansDenoising方法代碼示例

本文整理匯總了Python中cv2.fastNlMeansDenoising方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.fastNlMeansDenoising方法的具體用法?Python cv2.fastNlMeansDenoising怎麽用?Python cv2.fastNlMeansDenoising使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.fastNlMeansDenoising方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: prepare

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def prepare(input):
    # preprocessing the image input
    clean = cv2.fastNlMeansDenoising(input)
    ret, tresh = cv2.threshold(clean, 127, 1, cv2.THRESH_BINARY_INV)
    img = crop(tresh)

    # 40x10 image as a flatten array
    flatten_img = cv2.resize(img, (40, 10), interpolation=cv2.INTER_AREA).flatten()

    # resize to 400x100
    resized = cv2.resize(img, (400, 100), interpolation=cv2.INTER_AREA)
    columns = np.sum(resized, axis=0)  # sum of all columns
    lines = np.sum(resized, axis=1)  # sum of all lines

    h, w = img.shape
    aspect = w / h

    return [*flatten_img, *columns, *lines, aspect] 
開發者ID:gnbaron,項目名稱:signature-recognition,代碼行數:20,代碼來源:preprocessor.py

示例2: non_local_means_bw_py

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def non_local_means_bw_py(imgs, search_window, block_size, photo_render):
    import cv2
    ret_imgs = opencv_wrapper(imgs, cv2.fastNlMeansDenoising, [None,photo_render,block_size,search_window])
    return ret_imgs 
開發者ID:mzweilin,項目名稱:EvadeML-Zoo,代碼行數:6,代碼來源:squeeze.py

示例3: cleaning_probs

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def cleaning_probs(self, probs: np.ndarray, sigma: float) -> np.ndarray:
        # Smooth
        if sigma > 0.:
            return cv2.GaussianBlur(probs, (int(3 * sigma) * 2 + 1, int(3 * sigma) * 2 + 1), sigma)
        elif sigma == 0.:
            return cv2.fastNlMeansDenoising((probs * 255).astype(np.uint8), h=20) / 255
        else:  # Negative sigma, do not do anything
            return probs 
開發者ID:qurator-spk,項目名稱:sbb_textline_detection,代碼行數:10,代碼來源:main.py

示例4: cleaning_probs

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def cleaning_probs(probs: np.ndarray, sigma: float) -> np.ndarray:
    # Smooth
    if sigma > 0.:
        return cv2.GaussianBlur(probs, (int(3*sigma)*2+1, int(3*sigma)*2+1), sigma)
    elif sigma == 0.:
        return cv2.fastNlMeansDenoising((probs*255).astype(np.uint8), h=20)/255
    else:  # Negative sigma, do not do anything
        return probs 
開發者ID:dhlab-epfl,項目名稱:dhSegment,代碼行數:10,代碼來源:binarization.py

示例5: proximal

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def proximal(self):
        func = self

        class NLMProximal(Operator):
            def __init__(self, stepsize):
                super(NLMProximal, self).__init__(
                    func.domain, func.domain, linear=False)
                self.stepsize = stepsize

            def _call(self, x):
                h = func.h * self.stepsize

                if func.impl == 'skimage':
                    from skimage.restoration import denoise_nl_means
                    x_arr = x.asarray()
                    return denoise_nl_means(
                        x_arr,
                        patch_size=func.patch_size,
                        patch_distance=func.patch_distance,
                        h=h,
                        multichannel=False)
                elif func.impl == 'opencv':
                    import cv2
                    x_arr = x.asarray()
                    xmin, xmax = np.min(x_arr), np.max(x_arr)
                    x_arr = (x_arr - xmin) * 255.0 / (xmax - xmin)
                    x_arr = x_arr.astype('uint8')

                    h_scaled = h * 255.0 / (xmax - xmin)
                    res = cv2.fastNlMeansDenoising(
                        x_arr,
                        templateWindowSize=func.patch_size,
                        searchWindowSize=2 * func.patch_distance + 1,
                        h=h_scaled)

                    return res * (xmax - xmin) / 255.0 + xmin
        return NLMProximal 
開發者ID:odlgroup,項目名稱:odl,代碼行數:39,代碼來源:nonlocalmeans_functionals.py

示例6: recognize_card

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def recognize_card(idcard):
    result = []
    # TODO: 
    # process_image(original_image, cropped_image)
    # idcard = cv2.imread(cropped_, cv2.COLOR_BGR2GRAY)

    # In some cases resized image gives worse results
    # idcard = resize(idcard, width=720)

    gray = cv2.cvtColor(idcard, cv2.COLOR_BGR2GRAY)
    denoised = cv2.fastNlMeansDenoising(gray, None, 3, 7, 21)
    
    contours, hierarchy = recognize_text(gray)
    mask = np.zeros(gray.shape, np.uint8)

    for index, contour in enumerate(contours):
        [x, y, w, h] = cv2.boundingRect(contour)
        if h < 16 or w < 16:
            continue

        mskRoi = mask[y:y+h, x:x+w]
        cv2.drawContours(mask, [contour], 0, 255, -1) #CV_FILLED
        nz = cv2.countNonZero(mskRoi)
        ratio = (float)(nz) / (float)(h*w)
        
        # got this value from left heel
        if ratio > 0.55 and ratio < 0.9:
            roi = denoised[y:y+h, x:x+w] 
            text = pytesseract.image_to_string(Image.fromarray(roi), lang="kir+eng", config="-psm 7")
            if text:                
                item = {'x': x, 'y': y, 'w': w, 'h': h, 'text': text}
                result.append(item)
                cv2.rectangle(idcard, (x, y), (x + w, y + h), (255, 0, 255), 2)
    # need to restore settings
    hash_object = hashlib.sha256(idcard)
    hex_dig = hash_object.hexdigest()
    cv2.imwrite("/webapp/web/static/"+hex_dig+".jpeg", idcard)
    return "static/"+hex_dig+".jpeg", result 
開發者ID:maddevsio,項目名稱:idmatch,代碼行數:40,代碼來源:idcardocr.py

示例7: get_result_fix_length

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def get_result_fix_length(red, fix_length, langset, custom_config=''):
    red_org = red
    cv2.fastNlMeansDenoising(red, red, 4, 7, 35)
    rec, red = cv2.threshold(red, 127, 255, cv2.THRESH_BINARY_INV)
    image, contours, hierarchy = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # print(len(contours))
    # 描邊一次可以減少噪點
    cv2.drawContours(red, contours, -1, (0, 255, 0), 1)
    color_img = cv2.cvtColor(red, cv2.COLOR_GRAY2BGR)
    # for x, y, w, h in contours:
    #     imgrect = cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
    # showimg(imgrect)

    h_threshold = 54
    numset_contours = []
    calcu_cnt = 1
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        if h > h_threshold:
            numset_contours.append((x, y, w, h))
    while len(numset_contours) != fix_length:
        if calcu_cnt > 50:
            print(u'計算次數過多!目前閾值為:', h_threshold)
            break
        numset_contours = []
        calcu_cnt += 1
        if len(numset_contours) > fix_length:
            h_threshold += 1
            contours_cnt = 0
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                if h > h_threshold:
                    contours_cnt += 1
                    numset_contours.append((x, y, w, h))
        if len(numset_contours) < fix_length:
            h_threshold -= 1
            contours_cnt = 0
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                if h > h_threshold:
                    contours_cnt += 1
                    numset_contours.append((x, y, w, h))
    result_string = ''
    numset_contours.sort(key=lambda num: num[0])
    for x, y, w, h in numset_contours:
        result_string += pytesseract.image_to_string(cv2.UMat.get(red_org)[y-10:y + h + 10, x-10:x + w + 10], lang=langset, config=custom_config)
    # print(new_r)
    # cv2.imwrite('fixlengthred.png', cv2.UMat.get(red_org)[y-10:y + h +10 , x-10:x + w + 10])
    print(result_string)
    return result_string 
開發者ID:Raymondhhh90,項目名稱:idcardocr,代碼行數:52,代碼來源:idcardocr.py

示例8: get_result_vary_length

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import fastNlMeansDenoising [as 別名]
def get_result_vary_length(red, langset, org_img, custom_config=''):
    red_org = red
    # cv2.fastNlMeansDenoising(red, red, 4, 7, 35)
    rec, red = cv2.threshold(red, 127, 255, cv2.THRESH_BINARY_INV)
    image, contours, hierarchy = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # print(len(contours))
    # 描邊一次可以減少噪點
    cv2.drawContours(red, contours, -1, (255, 255, 255), 1)
    color_img = cv2.cvtColor(red, cv2.COLOR_GRAY2BGR)
    numset_contours = []
    height_list=[]
    width_list=[]
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        height_list.append(h)
        # print(h,w)
        width_list.append(w)
    height_list.remove(max(height_list))
    width_list.remove(max(width_list))
    height_threshold = 0.70*max(height_list)
    width_threshold = 1.4 * max(width_list)
    # print('height_threshold:'+str(height_threshold)+'width_threshold:'+str(width_threshold))
    big_rect=[]
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        if h > height_threshold and w < width_threshold:
            # print(h,w)
            numset_contours.append((x, y, w, h))
            big_rect.append((x, y))
            big_rect.append((x + w, y + h))
    big_rect_nparray = np.array(big_rect, ndmin=3)
    x, y, w, h = cv2.boundingRect(big_rect_nparray)
    # imgrect = cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
    # showimg(imgrect)
    # showimg(cv2.UMat.get(org_img)[y:y + h, x:x + w])

    result_string = ''
    result_string += pytesseract.image_to_string(cv2.UMat.get(red_org)[y-10:y + h + 10, x-10:x + w + 10], lang=langset,
                                                 config=custom_config)
    print(result_string)
    # cv2.imwrite('varylength.png', cv2.UMat.get(org_img)[y:y + h, x:x + w])
    # cv2.imwrite('varylengthred.png', cv2.UMat.get(red_org)[y:y + h, x:x + w])
    # numset_contours.sort(key=lambda num: num[0])
    # for x, y, w, h in numset_contours:
    #     result_string += pytesseract.image_to_string(cv2.UMat.get(color_img)[y:y + h, x:x + w], lang=langset, config=custom_config)
    return punc_filter(result_string) 
開發者ID:Raymondhhh90,項目名稱:idcardocr,代碼行數:48,代碼來源:idcardocr.py


注:本文中的cv2.fastNlMeansDenoising方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。