本文整理匯總了Python中cv2.groupRectangles方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.groupRectangles方法的具體用法?Python cv2.groupRectangles怎麽用?Python cv2.groupRectangles使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.groupRectangles方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: detect
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import groupRectangles [as 別名]
def detect(img_file, detector_xml_path, dest_img_file):
img = cv2.imread(img_file)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = cv2.CascadeClassifier(detector_xml_path)
min_size = (min(50, gray_img.shape[0] // 10), min(50, gray_img.shape[1] // 10))
hits = detector.detectMultiScale(gray_img, 1.1, 4, 0, min_size)
#cv2.groupRectangles(hits, 2)
print(hits)
hits_img = np.copy(img)
for (x,y,w,h) in hits:
cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)
cv2.imwrite(dest_img_file, hits_img)
示例2: get_text_with_location
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import groupRectangles [as 別名]
def get_text_with_location(boxed_image, contours, img):
image_text_dict = {}
for contour in contours:
# get rectangle bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
# cv2.groupRectangles
# draw rectangle around contour on original image
# if w < 20 or h < 20:
# continue
if w > 300:
continue
cv2.rectangle(
boxed_image, (x, y), (x + w + 10, y + h + 10),
thickness=2,
color=(0, 123, 123))
"""This writes the bounding box on image.
"""
box_read = extract_image_from_location(img, x, y, w, h)
box_read = box_read.strip()
image_text_dict[(x, y)] = box_read
return image_text_dict
示例3: filterRois
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import groupRectangles [as 別名]
def filterRois(rects, img_w, img_h, roi_min_area, roi_max_area, roi_min_side, roi_max_side, roi_max_aspect_ratio):
filteredRects = []
filteredRectsSet = set()
for rect in rects:
if tuple(rect) in filteredRectsSet: # excluding rectangles with same co-ordinates
continue
x, y, x2, y2 = rect
w = x2 - x
h = y2 - y
assert(w>=0 and h>=0)
# apply filters
if h == 0 or w == 0 or \
x2 > img_w or y2 > img_h or \
w < roi_min_side or h < roi_min_side or \
w > roi_max_side or h > roi_max_side or \
w * h < roi_min_area or w * h > roi_max_area or \
w / h > roi_max_aspect_ratio or h / w > roi_max_aspect_ratio:
continue
filteredRects.append(rect)
filteredRectsSet.add(tuple(rect))
# could combine rectangles using non-maximum surpression or with similar co-ordinates
# groupedRectangles, weights = cv2.groupRectangles(np.asanyarray(rectsInput, np.float).tolist(), 1, 0.3)
# groupedRectangles = nms_python(np.asarray(rectsInput, np.float), 0.5)
assert(len(filteredRects) > 0)
return filteredRects
示例4: detectvideo
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import groupRectangles [as 別名]
def detectvideo(vid_file, detector_xml_path, dest_img_dir):
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
detector = cv2.CascadeClassifier(detector_xml_path)
vid = imageio.get_reader(vid_file, 'ffmpeg')
# If size and source_size are not equal, then device was probably
# rotated (like a mobile) and we should compensate for the rotation.
# Images will have 'source_size' dimensions but we need 'size'.
metadata = vid.get_meta_data()
rotate = False
if metadata['source_size'] != metadata['size']:
print('Rotating')
rotate = True
for i, img in enumerate(vid):
if rotate:
#img = np.transpose(img, axes=(1, 0, 2)).copy()
img = np.rot90(img).copy()
print('Frame ',i, img.shape)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
min_size = (min(20, gray_img.shape[0] // 10), min(20, gray_img.shape[1] // 10))
hits = detector.detectMultiScale(gray_img, 1.1, 3, 0, min_size)
#cv2.groupRectangles(hits, 2)
print(len(hits), ' hits')
hits_img = np.copy(img)
if len(hits) > 0:
for (x,y,w,h) in hits:
cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)
cv2.imwrite(os.path.join(dest_img_dir, 'frame-%d.png'%(i)), hits_img)
示例5: advance_frame
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import groupRectangles [as 別名]
def advance_frame(self, frame, proto_objects_map):
"""Advances the algorithm by a single frame
This method tracks all objects via the following steps:
- adds all bounding boxes from saliency map as potential
targets
- finds bounding boxes from previous frame in current frame
via mean-shift tracking
- combines the two lists by removing duplicates
certain targets are discarded:
- targets that are too small
- targets that don't move
:param frame: New input RGB frame
:param proto_objects_map: corresponding proto-objects map of the
frame
:returns: frame annotated with bounding boxes around all objects
that are being tracked
"""
self.tracker = copy.deepcopy(frame)
# build a list of all bounding boxes
box_all = []
# append to the list all bounding boxes found from the
# current proto-objects map
box_all = self._append_boxes_from_saliency(proto_objects_map, box_all)
# find all bounding boxes extrapolated from last frame
# via mean-shift tracking
box_all = self._append_boxes_from_meanshift(frame, box_all)
# only keep those that are both salient and in mean shift
if len(self.object_roi) == 0:
group_thresh = 0 # no previous frame: keep all form saliency
else:
group_thresh = 1 # previous frame + saliency
box_grouped, _ = cv2.groupRectangles(box_all, group_thresh, 0.1)
# update mean-shift bookkeeping for remaining boxes
self._update_mean_shift_bookkeeping(frame, box_grouped)
# draw remaining boxes
for (x, y, w, h) in box_grouped:
cv2.rectangle(self.tracker, (x, y), (x + w, y + h),
(0, 255, 0), 2)
return self.tracker