當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.boundingRect方法代碼示例

本文整理匯總了Python中cv2.boundingRect方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.boundingRect方法的具體用法?Python cv2.boundingRect怎麽用?Python cv2.boundingRect使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.boundingRect方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: canny

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def canny(filepathname, left=70, right=140):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY)
    s = cv2.Canny(s, left, right)
    cv2.imshow('nier',s)
    return s

    # 圈出最小方矩形框,這裏Canny算法後都是白色線條,所以取色範圍 127-255 即可。
    # ret, binary = cv2.threshold(s,127,255,cv2.THRESH_BINARY) 
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10: # 有約束的畫框
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # # cv2.drawContours(s,contours,-1,(0,0,255),3) # 畫所有框
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
開發者ID:cilame,項目名稱:vrequest,代碼行數:21,代碼來源:pycv2.py

示例2: _find_size_candidates

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def _find_size_candidates(self, image):
        binary_image = self._filter_image(image)

        _, contours, _ = cv2.findContours(binary_image,
                                          cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)

        size_candidates = []
        for contour in contours:
            bounding_rect = cv2.boundingRect(contour)
            contour_area = cv2.contourArea(contour)
            if self._is_valid_contour(contour_area, bounding_rect):
                candidate = (bounding_rect[2] + bounding_rect[3]) / 2
                size_candidates.append(candidate)

        return size_candidates 
開發者ID:nemanja-m,項目名稱:gaps,代碼行數:18,代碼來源:size_detector.py

示例3: laplacian

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def laplacian(filepathname):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v, cv2.COLOR_BGR2GRAY)
    s = cv2.Laplacian(s, cv2.CV_16S, ksize=3)
    s = cv2.convertScaleAbs(s)
    cv2.imshow('nier',s)
    return s

    # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY)
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10:
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
開發者ID:cilame,項目名稱:vrequest,代碼行數:20,代碼來源:pycv2.py

示例4: __get_annotation__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
開發者ID:hazirbas,項目名稱:coco-json-converter,代碼行數:25,代碼來源:generate_coco_json.py

示例5: _append_boxes_from_saliency

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def _append_boxes_from_saliency(self, proto_objects_map, box_all):
        """Adds to the list all bounding boxes found with the saliency map

            A saliency map is used to find objects worth tracking in each
            frame. This information is combined with a mean-shift tracker
            to find objects of relevance that move, and to discard everything
            else.

            :param proto_objects_map: proto-objects map of the current frame
            :param box_all: append bounding boxes from saliency to this list
            :returns: new list of all collected bounding boxes
        """
        # find all bounding boxes in new saliency map
        box_sal = []
        cnt_sal, _ = cv2.findContours(proto_objects_map, 1, 2)
        for cnt in cnt_sal:
            # discard small contours
            if cv2.contourArea(cnt) < self.min_cnt_area:
                continue

            # otherwise add to list of boxes found from saliency map
            box = cv2.boundingRect(cnt)
            box_all.append(box)

        return box_all 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:27,代碼來源:tracking.py

示例6: filter_prediction

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def filter_prediction(self, output, image):
        if len(output) < 2:
            return pd.DataFrame()
        else:
            df = pd.DataFrame(output)
            df = df.assign(
                    area=lambda x: df[0].apply(lambda x: cv2.contourArea(x)),
                    bounding=lambda x: df[0].apply(lambda x: cv2.boundingRect(x))
                    )
            df = df[df['area'] > MIN_AREA]
            df_filtered = pd.DataFrame(
                    df['bounding'].values.tolist(), columns=['x1', 'y1', 'w', 'h'])
            df_filtered = df_filtered.assign(
                    x1=lambda x: x['x1'].clip(0),
                    y1=lambda x: x['y1'].clip(0),
                    x2=lambda x: (x['x1'] + x['w']),
                    y2=lambda x: (x['y1'] + x['h']),
                    label=lambda x: x.index.astype(str),
                    class_name=lambda x: x.index.astype(str),
                    )
            return df_filtered 
開發者ID:cristianpb,項目名稱:object-detection,代碼行數:23,代碼來源:motion.py

示例7: merge_img

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None):
    face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)

    for group in core.OVERLAY_POINTS:
        cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))

    r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))

    center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))

    if mat_multiple:
        mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
        face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))

    if blur_detail_x and blur_detail_y:
        face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center)

    return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE) 
開發者ID:gyp03,項目名稱:yry,代碼行數:20,代碼來源:morpher.py

示例8: mark_hand_center

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def mark_hand_center(frame_in,cont):    
    max_d=0
    pt=(0,0)
    x,y,w,h = cv2.boundingRect(cont)
    for ind_y in xrange(int(y+0.3*h),int(y+0.8*h)): #around 0.25 to 0.6 region of height (Faster calculation with ok results)
        for ind_x in xrange(int(x+0.3*w),int(x+0.6*w)): #around 0.3 to 0.6 region of width (Faster calculation with ok results)
            dist= cv2.pointPolygonTest(cont,(ind_x,ind_y),True)
            if(dist>max_d):
                max_d=dist
                pt=(ind_x,ind_y)
    if(max_d>radius_thresh*frame_in.shape[1]):
        thresh_score=True
        cv2.circle(frame_in,pt,int(max_d),(255,0,0),2)
    else:
        thresh_score=False
    return frame_in,pt,max_d,thresh_score

# 6. Find and display gesture 
開發者ID:mahaveerverma,項目名稱:hand-gesture-recognition-opencv,代碼行數:20,代碼來源:HandRecognition.py

示例9: getFeatures

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def getFeatures(img,bbox,use_shi=False):
    n_object = np.shape(bbox)[0]
    N = 0
    temp = np.empty((n_object,),dtype=np.ndarray)   # temporary storage of x,y coordinates
    for i in range(n_object):
        (xmin, ymin, boxw, boxh) = cv2.boundingRect(bbox[i,:,:].astype(int))
        roi = img[ymin:ymin+boxh,xmin:xmin+boxw]
        # cv2.imshow('roi',roi)
        if use_shi:
            corner_response = corner_shi_tomasi(roi)
        else:
            corner_response = corner_harris(roi)
        coordinates = peak_local_max(corner_response,num_peaks=20,exclude_border=2)
        coordinates[:,1] += xmin
        coordinates[:,0] += ymin
        temp[i] = coordinates
        if coordinates.shape[0] > N:
            N = coordinates.shape[0]
    x = np.full((N,n_object),-1)
    y = np.full((N,n_object),-1)
    for i in range(n_object):
        n_feature = temp[i].shape[0]
        x[0:n_feature,i] = temp[i][:,1]
        y[0:n_feature,i] = temp[i][:,0]
    return x,y 
開發者ID:jguoaj,項目名稱:multi-object-tracking,代碼行數:27,代碼來源:getFeatures.py

示例10: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def __init__(self, _contour):
        self.contour = _contour

        self.boundingRect = cv2.boundingRect(self.contour)

        [intX, intY, intWidth, intHeight] = self.boundingRect

        self.intBoundingRectX = intX
        self.intBoundingRectY = intY
        self.intBoundingRectWidth = intWidth
        self.intBoundingRectHeight = intHeight

        self.intBoundingRectArea = self.intBoundingRectWidth * self.intBoundingRectHeight

        self.intCenterX = (self.intBoundingRectX + self.intBoundingRectX + self.intBoundingRectWidth) / 2
        self.intCenterY = (self.intBoundingRectY + self.intBoundingRectY + self.intBoundingRectHeight) / 2

        self.fltDiagonalSize = math.sqrt((self.intBoundingRectWidth ** 2) + (self.intBoundingRectHeight ** 2))

        self.fltAspectRatio = float(self.intBoundingRectWidth) / float(self.intBoundingRectHeight)
    # end constructor

# end class 
開發者ID:muchlisinadi,項目名稱:ALPR-Indonesia,代碼行數:25,代碼來源:PossibleChar.py

示例11: calcSafeRect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def calcSafeRect(self, roi, src):
        '''
            return [x, y, w, h]
        '''
        box = cv2.boxPoints(roi)
        x, y, w, h = cv2.boundingRect(box)

        src_h, src_w, _ = src.shape

        tl_x = x if x > 0 else 0
        tl_y = y if y > 0 else 0
        br_x = x + w - 1 if x + w - 1 < src_w else src_w - 1
        br_y = y + h - 1 if y + h - 1 < src_h else src_h - 1

        roi_w = br_x - tl_x
        roi_h = br_y - tl_y
        if roi_w <= 0 or roi_h <= 0:
            return [tl_x, tl_y, roi_w, roi_h], False

        return [tl_x, tl_y, roi_w, roi_h], True 
開發者ID:SunskyF,項目名稱:EasyPR-python,代碼行數:22,代碼來源:plate_locate.py

示例12: sobel

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def sobel(filepathname):
    v = cv2.imread(filepathname)
    s = cv2.cvtColor(v,cv2.COLOR_BGR2GRAY)
    x, y = cv2.Sobel(s,cv2.CV_16S,1,0), cv2.Sobel(s,cv2.CV_16S,0,1)
    s = cv2.convertScaleAbs(cv2.subtract(x,y))
    s = cv2.blur(s,(9,9))
    cv2.imshow('nier',s)
    return s

    # ret, binary = cv2.threshold(s,40,255,cv2.THRESH_BINARY)
    # contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # for c in contours:
    #     x,y,w,h = cv2.boundingRect(c)
    #     if w>5 and h>10:
    #         cv2.rectangle(v,(x,y),(x+w,y+h),(155,155,0),1)
    # cv2.imshow('nier2',v)

    # cv2.waitKey()
    # cv2.destroyAllWindows() 
開發者ID:cilame,項目名稱:vrequest,代碼行數:21,代碼來源:pycv2.py

示例13: tightboundingbox

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def tightboundingbox(self, image):
        ret, thresh = cv2.threshold(np.array(image, dtype=np.uint8), 0, 255, 0)
        im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        bb = []
        for c in contours:
            x, y, w, h = cv2.boundingRect(c)
            # +1 is done to encapsulate entire figure
            w += 2
            h += 2
            x -= 1
            y -= 1
            x = np.max([0, x])
            y = np.max([0, y])
            bb.append([y, x, w, h])
        bb = self.nms(bb)
        return bb 
開發者ID:Hippogriff,項目名稱:CSGNet,代碼行數:18,代碼來源:Grouping.py

示例14: affine_skew

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def affine_skew(self, tilt, phi, img, mask=None):
        h, w = img.shape[:2]
        if mask is None:
            mask = np.zeros((h, w), np.uint8)
            mask[:] = 255
        A = np.float32([[1, 0, 0], [0, 1, 0]])
        if phi != 0.0:
            phi = np.deg2rad(phi)
            s, c = np.sin(phi), np.cos(phi)
            A = np.float32([[c, -s], [s, c]])
            corners = [[0, 0], [w, 0], [w, h], [0, h]]
            tcorners = np.int32(np.dot(corners, A.T))
            x, y, w, h = cv2.boundingRect(tcorners.reshape(1, -1, 2))
            A = np.hstack([A, [[-x], [-y]]])
            img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
        if tilt != 1.0:
            s = 0.8*np.sqrt(tilt * tilt - 1)
            img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
            img = cv2.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
            A[0] /= tilt
        if phi != 0.0 or tilt != 1.0:
            h, w = img.shape[:2]
            mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
        Ai = cv2.invertAffineTransform(A)
        return img, mask, Ai 
開發者ID:AVGInnovationLabs,項目名稱:DoNotSnap,代碼行數:27,代碼來源:AffineInvariantFeatures.py

示例15: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import boundingRect [as 別名]
def __init__(self, cntr):
        self.contour = cntr

        self.boundingRect = cv2.boundingRect(self.contour)

        [x, y, w, h] = self.boundingRect

        self.boundingRectX = x
        self.boundingRectY = y
        self.boundingRectWidth = w
        self.boundingRectHeight = h

        self.boundingRectArea = self.boundingRectWidth * self.boundingRectHeight

        self.centerX = (self.boundingRectX + self.boundingRectX + self.boundingRectWidth) / 2
        self.centerY = (self.boundingRectY + self.boundingRectY + self.boundingRectHeight) / 2

        self.diagonalSize = math.sqrt((self.boundingRectWidth ** 2) + (self.boundingRectHeight ** 2))

        self.aspectRatio = float(self.boundingRectWidth) / float(self.boundingRectHeight) 
開發者ID:Link009,項目名稱:LPEX,代碼行數:22,代碼來源:Functions.py


注:本文中的cv2.boundingRect方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。