本文整理汇总了Python中cv2.meanShift方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.meanShift方法的具体用法?Python cv2.meanShift怎么用?Python cv2.meanShift使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv2
的用法示例。
在下文中一共展示了cv2.meanShift方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _append_boxes_from_meanshift
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import meanShift [as 别名]
def _append_boxes_from_meanshift(self, frame, box_all):
"""Adds to the list all bounding boxes found with mean-shift tracking
Mean-shift tracking is used to track objects from frame to frame.
This information is combined with a saliency map to discard
false-positives and focus only on relevant objects that move.
:param frame: current RGB image frame
:box_all: append bounding boxes from tracking to this list
:returns: new list of all collected bounding boxes
"""
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
for i in xrange(len(self.object_roi)):
roi_hist = copy.deepcopy(self.object_roi[i])
box_old = copy.deepcopy(self.object_box[i])
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
ret, box_new = cv2.meanShift(dst, tuple(box_old), self.term_crit)
self.object_box[i] = copy.deepcopy(box_new)
# discard boxes that don't move
(xo, yo, wo, ho) = box_old
(xn, yn, wn, hn) = box_new
co = [xo + wo/2, yo + ho/2]
cn = [xn + wn/2, yn + hn/2]
if (co[0]-cn[0])**2 + (co[1]-cn[1])**2 >= self.min_shift2:
box_all.append(box_new)
return box_all
示例2: test_features
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import meanShift [as 别名]
def test_features():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
# r, h, c, w = 200, 100, 200, 100
# track_window = (c, r, w, h)
# oldimg = cv2.imread('base1.png')
# roi = oldimg[r:r+h, c:c+w]
# hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv_roi, 0, 255)
# roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
# cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
try:
w, h = d._screen.shape[:2]
img = cv2.resize(d._screen, (h/2, w/2))
cv2.imshow('preview', img)
hist = cv2.calcHist([img], [0], None, [256], [0,256])
plt.plot(plt.hist(hist.ravel(), 256))
plt.show()
# if img.shape == oldimg.shape:
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
# # x, y, w, h = track_window
# cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
# cv2.imshow('preview', img)
# # cv2.imshow('preview', img)
cv2.waitKey(1)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
示例3: run_main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import meanShift [as 别名]
def run_main():
cap = cv2.VideoCapture('crash-480.mp4')
#cap.set(3,320)
#cap.set(4,240)
# Read the first frame of the video
ret, frame = cap.read()
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
c,r,w,h = 427,240,50,50
track_window = (c,r,w,h)
# Create mask and normalized histogram
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
while True:
t = cv2.getTickCount()
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
.5, (255,255,255), 1, cv2.CV_AA)
t = cv2.getTickCount() - t
print "detection time = %gms" % (t/(cv2.getTickFrequency()*1000.))
cv2.imshow('Tracking', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()