當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.meanShift方法代碼示例

本文整理匯總了Python中cv2.meanShift方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.meanShift方法的具體用法?Python cv2.meanShift怎麽用?Python cv2.meanShift使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.meanShift方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _append_boxes_from_meanshift

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import meanShift [as 別名]
def _append_boxes_from_meanshift(self, frame, box_all):
        """Adds to the list all bounding boxes found with mean-shift tracking

            Mean-shift tracking is used to track objects from frame to frame.
            This information is combined with a saliency map to discard
            false-positives and focus only on relevant objects that move.

            :param frame: current RGB image frame
            :box_all: append bounding boxes from tracking to this list
            :returns: new list of all collected bounding boxes
        """
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        for i in xrange(len(self.object_roi)):
            roi_hist = copy.deepcopy(self.object_roi[i])
            box_old = copy.deepcopy(self.object_box[i])

            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, box_new = cv2.meanShift(dst, tuple(box_old), self.term_crit)
            self.object_box[i] = copy.deepcopy(box_new)

            # discard boxes that don't move
            (xo, yo, wo, ho) = box_old
            (xn, yn, wn, hn) = box_new

            co = [xo + wo/2, yo + ho/2]
            cn = [xn + wn/2, yn + hn/2]
            if (co[0]-cn[0])**2 + (co[1]-cn[1])**2 >= self.min_shift2:
                box_all.append(box_new)

        return box_all 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:33,代碼來源:tracking.py

示例2: test_features

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import meanShift [as 別名]
def test_features():
    from atx.drivers.android_minicap import AndroidDeviceMinicap
    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    # r, h, c, w = 200, 100, 200, 100
    # track_window = (c, r, w, h)
    # oldimg = cv2.imread('base1.png')
    # roi = oldimg[r:r+h, c:c+w]
    # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv_roi, 0, 255)
    # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
    # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,  10, 1)


    while True:
        try:
            w, h = d._screen.shape[:2]
            img = cv2.resize(d._screen, (h/2, w/2))
            cv2.imshow('preview', img)

            hist = cv2.calcHist([img], [0], None, [256], [0,256])
            plt.plot(plt.hist(hist.ravel(), 256))
            plt.show()
            # if img.shape == oldimg.shape:
            #     # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            #     # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
            #     # x, y, w, h = track_window
            #     cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            #     cv2.imshow('preview', img)
            # # cv2.imshow('preview', img)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break

    cv2.destroyWindow('preview') 
開發者ID:NetEaseGame,項目名稱:ATX,代碼行數:39,代碼來源:test_monkey.py

示例3: run_main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import meanShift [as 別名]
def run_main():
    cap = cv2.VideoCapture('crash-480.mp4')
    #cap.set(3,320)
    #cap.set(4,240)

    # Read the first frame of the video
    ret, frame = cap.read()

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking
    c,r,w,h = 427,240,50,50
    track_window = (c,r,w,h)

    # Create mask and normalized histogram
    roi = frame[r:r+h, c:c+w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    
    while True:
        t = cv2.getTickCount()
        ret, frame = cap.read()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x,y,w,h = track_window
        cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
        cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
            .5, (255,255,255), 1, cv2.CV_AA)
        
        t = cv2.getTickCount() - t
        print "detection time = %gms" % (t/(cv2.getTickFrequency()*1000.))
        cv2.imshow('Tracking', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows() 
開發者ID:alduxvm,項目名稱:rpi-opencv,代碼行數:46,代碼來源:object-tracking.py


注:本文中的cv2.meanShift方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。