當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.threshold方法代碼示例

本文整理匯總了Python中cv2.threshold方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.threshold方法的具體用法?Python cv2.threshold怎麽用?Python cv2.threshold使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.threshold方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: canny

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def canny(filepathname, left=70, right=140):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY)
    s = cv2.Canny(s, left, right)
    cv2.imshow('nier',s)
    return s

    # 圈出最小方矩形框,這裏Canny算法後都是白色線條,所以取色範圍 127-255 即可。
    # ret, binary = cv2.threshold(s,127,255,cv2.THRESH_BINARY) 
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10: # 有約束的畫框
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # # cv2.drawContours(s,contours,-1,(0,0,255),3) # 畫所有框
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
開發者ID:cilame,項目名稱:vrequest,代碼行數:21,代碼來源:pycv2.py

示例2: laplacian

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def laplacian(filepathname):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY)
    s = cv2.Laplacian(s, cv2.CV_16S, ksize=3)
    s = cv2.convertScaleAbs(s)
    cv2.imshow('nier',s)
    return s

    # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY)
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10:
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
開發者ID:cilame,項目名稱:vrequest,代碼行數:20,代碼來源:pycv2.py

示例3: segment

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def segment(image, threshold=25):
    global bg
    # find the absolute difference between background and current frame
    diff = cv2.absdiff(bg.astype("uint8"), image)

    # threshold the diff image so that we get the foreground
    thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]

    # get the contours in the thresholded image
    (_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # return None, if no contours detected
    if len(cnts) == 0:
        return
    else:
        # based on contour area, get the maximum contour which is the hand
        segmented = max(cnts, key=cv2.contourArea)
        return (thresholded, segmented)

#-----------------
# MAIN FUNCTION
#----------------- 
開發者ID:Gogul09,項目名稱:gesture-recognition,代碼行數:24,代碼來源:segment.py

示例4: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def main():
	capture = cv2.VideoCapture(0)
	_, image = capture.read()
	previous = image.copy()
	
	
	while (cv2.waitKey(1) < 0):
		_, image = capture.read()
		diff = cv2.absdiff(image, previous)
		#image = cv2.flip(image, 3)
		#image = cv2.norm(image)
		_, diff = cv2.threshold(diff, 32, 0, cv2.THRESH_TOZERO)
		_, diff = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY)
		
		diff = cv2.medianBlur(diff, 5)
		
		cv2.imshow('video', diff)
		previous = image.copy()
		
	capture.release()
	cv2.destroyAllWindows() 
開發者ID:petern3,項目名稱:crop_row_detection,代碼行數:23,代碼來源:camera_test.py

示例5: prediction

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
開發者ID:cristianpb,項目名稱:object-detection,代碼行數:19,代碼來源:motion.py

示例6: movement

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
開發者ID:PiSimo,項目名稱:PiCamNN,代碼行數:20,代碼來源:picam.py

示例7: find_squares

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:22,代碼來源:squares.py

示例8: get_proto_objects_map

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def get_proto_objects_map(self, use_otsu=True):
        """Returns the proto-objects map of an RGB image

            This method generates a proto-objects map of an RGB image.
            Proto-objects are saliency hot spots, generated by thresholding
            the saliency map.

            :param use_otsu: flag whether to use Otsu thresholding (True) or
                             a hardcoded threshold value (False)
            :returns: proto-objects map
        """
        saliency = self.get_saliency_map()

        if use_otsu:
            _, img_objects = cv2.threshold(np.uint8(saliency*255), 0, 255,
                                           cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        else:
            thresh = np.mean(saliency)*255*3
            _, img_objects = cv2.threshold(np.uint8(saliency*255), thresh, 255,
                                           cv2.THRESH_BINARY)
        return img_objects 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:23,代碼來源:saliency.py

示例9: prepare

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def prepare(input):
    # preprocessing the image input
    clean = cv2.fastNlMeansDenoising(input)
    ret, tresh = cv2.threshold(clean, 127, 1, cv2.THRESH_BINARY_INV)
    img = crop(tresh)

    # 40x10 image as a flatten array
    flatten_img = cv2.resize(img, (40, 10), interpolation=cv2.INTER_AREA).flatten()

    # resize to 400x100
    resized = cv2.resize(img, (400, 100), interpolation=cv2.INTER_AREA)
    columns = np.sum(resized, axis=0)  # sum of all columns
    lines = np.sum(resized, axis=1)  # sum of all lines

    h, w = img.shape
    aspect = w / h

    return [*flatten_img, *columns, *lines, aspect] 
開發者ID:gnbaron,項目名稱:signature-recognition,代碼行數:20,代碼來源:preprocessor.py

示例10: find_waves

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def find_waves(threshold, histogram):
	up_point = -1#上升點
	is_peak = False
	if histogram[0] > threshold:
		up_point = 0
		is_peak = True
	wave_peaks = []
	for i,x in enumerate(histogram):
		if is_peak and x < threshold:
			if i - up_point > 2:
				is_peak = False
				wave_peaks.append((up_point, i))
		elif not is_peak and x >= threshold:
			is_peak = True
			up_point = i
	if is_peak and up_point != -1 and i - up_point > 4:
		wave_peaks.append((up_point, i))
	return wave_peaks

#根據找出的波峰,分隔圖片,從而得到逐個字符圖片 
開發者ID:wzh191920,項目名稱:License-Plate-Recognition,代碼行數:22,代碼來源:predict.py

示例11: process

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def process(eval_img, device='cpu'):
    (img, origin, unpadder), file_name = eval_img
    with torch.no_grad():
        out = model(img.to(device))

    prob = F.sigmoid(out)
    mask = prob > 0.5
    mask = torch.nn.MaxPool2d(kernel_size=(3, 3), padding=(1, 1), stride=1)(mask.float()).byte()
    mask = unpadder(mask)
    mask = mask.float().cpu()

    save_image(mask, file_name + ' _mask.jpg')
    origin_np = np.array(to_pil_image(origin[0]))
    mask_np = to_pil_image(mask[0]).convert("L")
    mask_np = np.array(mask_np, dtype='uint8')
    mask_np = draw_bounding_box(origin_np, mask_np, 500)
    mask_ = Image.fromarray(mask_np)
    mask_.save(file_name + "_contour.jpg")
    # ret, mask_np = cv2.threshold(mask_np, 127, 255, 0)
    # dst = cv2.inpaint(origin_np, mask_np, 1, cv2.INPAINT_NS)
    # out = Image.fromarray(dst)
    # out.save(file_name + ' _box.jpg') 
開發者ID:yu45020,項目名稱:Text_Segmentation_Image_Inpainting,代碼行數:24,代碼來源:demo_segmentation.py

示例12: crop_row_detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def crop_row_detect(image_in):
    
    save_image('0_image_in', image_in)
    
    ### Grayscale Transform ###
    image_edit = grayscale_transform(image_in)
    save_image('1_image_gray', image_edit)
        
    ### Binarization ###
    _, image_edit = cv2.threshold(image_edit, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    save_image('2_image_bin', image_edit)
    
    ### Stripping ###
    crop_points = strip_process(image_edit)
    save_image('8_crop_points', crop_points)
    
    ### Hough Transform ###
    crop_lines = crop_point_hough(crop_points)
    save_image('9_image_hough', cv2.addWeighted(image_in, 1, crop_lines, 1, 0.0))
    
    return crop_lines 
開發者ID:petern3,項目名稱:crop_row_detection,代碼行數:23,代碼來源:line_detect_1.py

示例13: skeletonize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def skeletonize(image_in):
    '''Inputs and grayscale image and outputs a binary skeleton image'''
    size = np.size(image_in)
    skel = np.zeros(image_in.shape, np.uint8)

    ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    done = False

    while not done:
        eroded = cv2.erode(image_edit, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(image_edit, temp)
        skel = cv2.bitwise_or(skel, temp)
        image_edit = eroded.copy()

        zeros = size - cv2.countNonZero(image_edit)
        if zeros == size:
            done = True

    return skel 
開發者ID:petern3,項目名稱:crop_row_detection,代碼行數:23,代碼來源:line_detect_2.py

示例14: blend_non_transparent

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def blend_non_transparent(sprite, background_img):
    gray_overlay = cv2.cvtColor(background_img, cv2.COLOR_BGR2GRAY)
    overlay_mask = cv2.threshold(gray_overlay, 1, 255, cv2.THRESH_BINARY)[1]

    overlay_mask = cv2.erode(overlay_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    overlay_mask = cv2.blur(overlay_mask, (3, 3))

    background_mask = 255 - overlay_mask

    overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
    background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)

    sprite_part = (sprite * (1 / 255.0)) * (background_mask * (1 / 255.0))
    overlay_part = (background_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))

    return np.uint8(cv2.addWeighted(sprite_part, 255.0, overlay_part, 255.0, 0.0)) 
開發者ID:guille0,項目名稱:hazymaze,代碼行數:18,代碼來源:helpers.py

示例15: sobelOperT

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import threshold [as 別名]
def sobelOperT(self, img, blursize, morphW, morphH):
        '''
            No different with sobelOper ? 
        '''
        blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT)

        if len(blur.shape) == 3:
            gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)
        else:
            gray = blur

        x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, 3)
        absX = cv2.convertScaleAbs(x)
        grad = cv2.addWeighted(absX, 1, 0, 0, 0)

        _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH))
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element)

        return threshold 
開發者ID:SunskyF,項目名稱:EasyPR-python,代碼行數:23,代碼來源:plate_locate.py


注:本文中的cv2.threshold方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。