當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.CHAIN_APPROX_TC89_KCOS屬性代碼示例

本文整理匯總了Python中cv2.CHAIN_APPROX_TC89_KCOS屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CHAIN_APPROX_TC89_KCOS屬性的具體用法?Python cv2.CHAIN_APPROX_TC89_KCOS怎麽用?Python cv2.CHAIN_APPROX_TC89_KCOS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.CHAIN_APPROX_TC89_KCOS屬性的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: findTargets

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def findTargets(frame, mask):
    # Finds contours
    _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
    # Take each frame
    # Gets the shape of video
    screenHeight, screenWidth, _ = frame.shape
    # Gets center of height and width
    centerX = (screenWidth / 2) - .5
    centerY = (screenHeight / 2) - .5
    # Copies frame and stores it in image
    image = frame.copy()
    # Processes the contours, takes in (contours, output_image, (centerOfImage)
    if len(contours) != 0:
        image = findTape(contours, image, centerX, centerY)
    else:
        # pushes that it deosn't see vision target to network tables
        networkTable.putBoolean("tapeDetected", False)

    # Shows the contours overlayed on the original video
    return image

# Finds the balls from the masked image and displays them on original stream + network tables 
開發者ID:team3997,項目名稱:ChickenVision,代碼行數:24,代碼來源:ChickenVision.py

示例2: findCargo

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def findCargo(frame, mask):
    # Finds contours
    _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
    # Take each frame
    # Gets the shape of video
    screenHeight, screenWidth, _ = frame.shape
    # Gets center of height and width
    centerX = (screenWidth / 2) - .5
    centerY = (screenHeight / 2) - .5
    # Copies frame and stores it in image
    image = frame.copy()
    # Processes the contours, takes in (contours, output_image, (centerOfImage)
    if len(contours) != 0:
        image = findBall(contours, image, centerX, centerY)
    else:
        # pushes that it doesn't see cargo to network tables
        networkTable.putBoolean("cargoDetected", False)
    # Shows the contours overlayed on the original video
    return image


# Draws Contours and finds center and yaw of orange ball
# centerX is center x coordinate of image
# centerY is center y coordinate of image 
開發者ID:team3997,項目名稱:ChickenVision,代碼行數:26,代碼來源:ChickenVision.py

示例3: contours_hierarchy

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def contours_hierarchy(mask):
    # first, find contours with cv2: it's much faster than shapely
    image, contours, hierarchy = cv2.findContours(
        ((mask == 1) * 255).astype(np.uint8),
        cv2.RETR_CCOMP,
        cv2.CHAIN_APPROX_TC89_KCOS)  # cv2.CHAIN_APPROX_SIMPLE,#orig cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS
    return contours, hierarchy 
開發者ID:SpaceNetChallenge,項目名稱:SpaceNet_Off_Nadir_Solutions,代碼行數:9,代碼來源:util.py

示例4: binarized_whatlike_filtered_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def binarized_whatlike_filtered_image(self, img):
        """
        Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate
        :param img: input image
        :return: list of roi pair (top_roi, fv_roi) class which defined in imdb.py
        """
        if img is None:
            raise ValueError('Image data is invalid')
        # intensity normalizing the image and thresholding thre image
        image = img[:, :, 0]
        inds = np.where(image[:, :] > 650)
        norm_thresh_img = np.zeros(image.shape).astype(np.uint8)
        norm_thresh_img[inds] = 255

        # find connected component
        image, contours, hierarchy = cv2.findContours(image=norm_thresh_img, mode=cv2.RETR_CCOMP,
                                                      method=cv2.CHAIN_APPROX_TC89_KCOS)
        response_points = self.__find_response_points_in_contours(contours=contours, image=norm_thresh_img)

        # find rotate rect of each contour and check if it fits the condition, if fits the condition then save the
        # bounding rectangle of the contour
        result = []
        valid_contours = 0
        for index, contour in enumerate(contours):
            rotrect = cv2.minAreaRect(contour)
            if self.__is_rrect_valid(rotrect):
                # the contours is valid and can be saved
                roi_contours = contour
                roi_contours = np.reshape(roi_contours, newshape=(roi_contours.shape[0], roi_contours.shape[2]))
                roi_index = valid_contours
                valid_contours += 1
                top_roi_db = imdb.Roidb(roi_index=roi_index, roi_contours=roi_contours,
                                        roi_response_points=response_points[index])  # type:
                fv_roi_db, roi_is_valid = self.__map_roi_to_front_view(roidb=top_roi_db)
                if roi_is_valid:
                    result.append((top_roi_db, fv_roi_db))
        return result, norm_thresh_img 
開發者ID:MaybeShewill-CV,項目名稱:DVCNN_Lane_Detection,代碼行數:39,代碼來源:binarized_filter_result.py

示例5: __extract_line_from_filtered_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def __extract_line_from_filtered_image(img):
        """
        Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate
        :param img:input image
        :return:rotate rect list []
        """
        image = img[:, :, 0]
        inds = np.where(image[:, :] > 300)
        norm_thresh_image = np.zeros(image.shape).astype(np.uint8)
        norm_thresh_image[inds] = 255

        # find connected component
        image, contours, hierarchy = cv2.findContours(image=norm_thresh_image, mode=cv2.RETR_CCOMP,
                                                      method=cv2.CHAIN_APPROX_TC89_KCOS)

        # find rotate rect of each contour and check if it fits the condition, if fits the condition then save the
        # bounding rectangle of the contour
        rotate_rect_list = []
        bounding_rect_list = []
        for i in range(len(contours)):
            contour = contours[i]
            rotrect = cv2.minAreaRect(contour)
            if RoiExtractor.__is_rrect_valid(rotrect):
                rotate_rect_list.append(rotrect)
                bnd_rect = cv2.boundingRect(contour)
                bounding_rect_list.append(bnd_rect)
        result = {
            'rotate_rect_list': rotate_rect_list,
            'bounding_rect_list': bounding_rect_list
        }
        return result 
開發者ID:MaybeShewill-CV,項目名稱:DVCNN_Lane_Detection,代碼行數:33,代碼來源:extract_candidate.py

示例6: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def __init__(self, img, **kwargs):
        contours, hierarchy = _find_contours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS, **kwargs)
        self.hierarchy = hierarchy
        self.contours = contours
        self.imgshape = img.shape 
開發者ID:ulikoehler,項目名稱:OTR,代碼行數:7,代碼來源:TableRecognition.py

示例7: compute_missing_cell_contours

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def compute_missing_cell_contours(self, missing_cells_mask):
        contx, _ = _find_contours(missing_cells_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
        return contx 
開發者ID:ulikoehler,項目名稱:OTR,代碼行數:5,代碼來源:TableRecognition.py

示例8: extractPiece

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def extractPiece(tile, margin=0.05):
   imgs = [tile]
   w, h, _ = tile.shape

   im_gray = cv2.cvtColor(tile, cv2.COLOR_BGR2GRAY)
   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))

#   im_gray = im_gray[(h*margin):(h*(1-margin)),
#                     (w*margin):(w*(1-margin))]
#   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))


#   im_gray = cv2.equalizeHist(im_gray)
   im_gray = cv2.medianBlur(im_gray, 3)
   imgs.append(cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR))



   bright = np.mean(im_gray)
   im_bw = im_gray
   im_bw[np.where(im_gray < bright)] = 0
   im_bw[np.where(im_gray >= bright)] = 255
   imgs.append(cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR))


   if np.mean(im_bw) < 128:
      im_bw = 255 - im_bw

   imgs.append(cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR))


   #_, im_bw = cv2.threshold(im_gray, 50, 250, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
   #im_bw = cv2.Canny(im_bw, 0,255, apertureSize=5)



   contours,hierarchy = cv2.findContours(im_bw.copy(),  cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)

   hulls = [cv2.convexHull(c) for c in contours]
   ids = ignoreContours(im_bw, hulls, max_area_percentage=0.75, min_area_percentage=0.2)

   im_bw = cv2.cvtColor(im_bw, cv2.COLOR_GRAY2BGR)
   tmp = im_bw.copy()
   for i in ids:
      c = np.squeeze(hulls[i], 1)
      drawContour(tmp, c, randomColor(), thickness=1)

   imgs.append(tmp)


   return imgs 
開發者ID:nebbles,項目名稱:DE3-ROB1-CHESS,代碼行數:53,代碼來源:main.py

示例9: extractBoards

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CHAIN_APPROX_TC89_KCOS [as 別名]
def extractBoards(img, w, h):
    """Extracts all boards from an image. This function applies perspective correction.
    :param img: source image
    :param w: output width
    :param h: output height
    :returns: a list the extracted board images
    """
    im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ## Doc ##
    #writeDocumentationImage(im_gray, "gray")
    ## Doc ##

    (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    ## Doc ##
    #writeDocumentationImage(im_bw, "bw")
    ## Doc ##

    contours,hierarchy = cv2.findContours(im_bw,  cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)

    ## Doc ##
    #doc_im_contour = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    #for c in contours:
    #    c = np.squeeze(c,1)
    #    drawContour(doc_im_contour, c, randomColor())
    #writeDocumentationImage(doc_im_contour, "contours")
    #doc_im_perspective = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    #doc_im_contour = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)
    ## Doc ##

    contour_ids = ignoreContours(im_bw, contours, hierarchy)
    boards = []
    for i in contour_ids:
        c = contours[i]
        c = np.squeeze(c,1)

        ## Doc ##
        #color = randomColor()
        #drawContour(doc_im_contour, c, color)
        ## Doc ##

        perspective = getPerspective(img, c)
        if perspective is not None:
            b = extractPerspective(img, perspective, w, h)
            boards.append(b)
            ## Doc ##
            #drawPerspective(doc_im_perspective, perspective)
            ## Doc ##

    ## Doc ##
    #writeDocumentationImage(boards[-1], "extracted")
    #writeDocumentationImage(doc_im_contour, "contours_filtered")
    #writeDocumentationImage(doc_im_perspective, "perspective")
    ## Doc ##


    return boards 
開發者ID:nebbles,項目名稱:DE3-ROB1-CHESS,代碼行數:60,代碼來源:extractBoard.py


注:本文中的cv2.CHAIN_APPROX_TC89_KCOS屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。