本文整理汇总了Python中PostProcessing.get_roi_from_images方法的典型用法代码示例。如果您正苦于以下问题:Python PostProcessing.get_roi_from_images方法的具体用法?Python PostProcessing.get_roi_from_images怎么用?Python PostProcessing.get_roi_from_images使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PostProcessing
的用法示例。
在下文中一共展示了PostProcessing.get_roi_from_images方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: import PostProcessing [as 别名]
# 或者: from PostProcessing import get_roi_from_images [as 别名]
def run(self, rect, cur_frame, next_frame):
x, y, w, h = rect
cur_roi = PostProcessing.get_roi_from_images(rect, cur_frame)
center_of_window = (x + (w / 2), y + (h / 2))
# compute centroid of current frame
cur_moment = cv2.moments(cur_roi)
cx = x + int(cur_moment['m10'] / cur_moment['m00'])
cy = y + int(cur_moment['m01'] / cur_moment['m00'])
cur_frame_centroid = (cx, cy)
# compute centroid of next frame with current windows
cur_roi_next = PostProcessing.get_roi_from_images(rect, next_frame)
cur_moment_next = cv2.moments(cur_roi_next)
next_cx = x + int(cur_moment_next['m10'] / cur_moment_next['m00'])
next_cy = y + int(cur_moment_next['m01'] / cur_moment_next['m00'])
next_frame_centroid = (next_cx, next_cy)
# calculate distance between current frame centroid and next frame centroid
x0, y0 = cur_frame_centroid
x1, y1 = next_frame_centroid
xwin, ywin = center_of_window
new_center_of_window = ((xwin + (x1 - x0)), (ywin + (y1 - y0)))
new_rect = (new_center_of_window[0] - (w / 2), new_center_of_window[1] - (h / 2), w, h)
print new_rect
pass
示例2: add_object
# 需要导入模块: import PostProcessing [as 别名]
# 或者: from PostProcessing import get_roi_from_images [as 别名]
def add_object(self, obj, frame):
# set up ROI
roi = PostProcessing.get_roi_from_images(obj, frame)
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
n_in_frame = 0
n_not_moving = 0
self.list_of_objects.append((obj, hsv_roi, n_in_frame, n_not_moving))
示例3: apply_meanshift
# 需要导入模块: import PostProcessing [as 别名]
# 或者: from PostProcessing import get_roi_from_images [as 别名]
def apply_meanshift(self, obj, cur_frame, next_frame):
new_obj = None
if next_frame is not None and cur_frame is not None:
cur_frame_gray = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)
next_frame_gray = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)
x, y, w, h = obj
if w > 0 and h > 0:
cur_roi = PostProcessing.get_roi_from_images(obj, cur_frame_gray)
# center_of_window = (x + (w / 2), y + (h / 2))
# compute centroid of current frame
cur_moment = cv2.moments(cur_roi)
cx0 = x + int(cur_moment['m10'] / cur_moment['m00'])
cy0 = y + int(cur_moment['m01'] / cur_moment['m00'])
num_of_iteration = 0
delta = -1
prev_obj = obj
while (num_of_iteration < 15) and (delta > 1 or delta == -1):
x1, y1, w1, h1 = prev_obj
next_frame_roi = PostProcessing.get_roi_from_images(prev_obj, next_frame_gray)
next_w, next_h = next_frame_roi.shape
if next_w > 0 and next_h > 0:
# get moment
next_frame_moment = cv2.moments(next_frame_roi)
cx1 = x1 + int(next_frame_moment['m10'] / next_frame_moment['m00'])
cy1 = y1 + int(next_frame_moment['m01'] / next_frame_moment['m00'])
# compare with previous moment
deltacx = cx1 - cx0
deltacy = cy1 - cy0
new_obj = x1+deltacx, y1+deltacy, w1, h1
# initialization for next iteration
cx0, cy0 = cx1, cy1
prev_obj = new_obj
delta = math.sqrt((deltacx)**2 + (deltacy)**2)
num_of_iteration += 1
return new_obj