當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.rectangle方法代碼示例

本文整理匯總了Python中cv2.rectangle方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.rectangle方法的具體用法?Python cv2.rectangle怎麽用?Python cv2.rectangle使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.rectangle方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: drawBoundingBox

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def drawBoundingBox(self,imgcv,result):
        for box in result:
            # print(box)
            x1,y1,x2,y2 = (box['topleft']['x'],box['topleft']['y'],box['bottomright']['x'],box['bottomright']['y'])
            conf = box['confidence']
            # print(conf)
            label = box['label']
            if conf < self.predictThresh:
                continue
            # print(x1,y1,x2,y2,conf,label)
            cv2.rectangle(imgcv,(x1,y1),(x2,y2),(0,255,0),6)
            labelSize=cv2.getTextSize(label,cv2.FONT_HERSHEY_COMPLEX,0.5,2)
            # print('labelSize>>',labelSize)
            _x1 = x1
            _y1 = y1#+int(labelSize[0][1]/2)
            _x2 = _x1+labelSize[0][0]
            _y2 = y1-int(labelSize[0][1])
            cv2.rectangle(imgcv,(_x1,_y1),(_x2,_y2),(0,255,0),cv2.FILLED)
            cv2.putText(imgcv,label,(x1,y1),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0),1)
        return imgcv 
開發者ID:AmeyaWagh,項目名稱:Traffic_sign_detection_YOLO,代碼行數:22,代碼來源:objectDetectorYOLO.py

示例2: __get_annotation__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
開發者ID:hazirbas,項目名稱:coco-json-converter,代碼行數:25,代碼來源:generate_coco_json.py

示例3: draw_boxes_frame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size):
  """Draws detected boxes in a video frame"""
  boxes_dict = boxes_dicts[0]
  resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0])
  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]
    color = (0, 0, 255)
    if np.size(boxes) != 0:
      for box in boxes:
        xy = box[:4]
        xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)]
        cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2)
        (test_width, text_height), baseline = cv2.getTextSize(class_names[cls],
                                                              cv2.FONT_HERSHEY_SIMPLEX,
                                                              0.75, 1)
        cv2.rectangle(frame,
                      (xy[0], xy[1]),
                      (xy[0] + test_width, xy[1] - text_height - baseline),
                      color[::-1],
                      thickness=cv2.FILLED)
        cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1) 
開發者ID:kcosta42,項目名稱:Tensorflow-YOLOv3,代碼行數:23,代碼來源:utils.py

示例4: show

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
開發者ID:AmeyaWagh,項目名稱:Traffic_sign_detection_YOLO,代碼行數:19,代碼來源:misc.py

示例5: vis_det_and_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8):
    """Visual debugging of detections."""
    num_dets = np.minimum(10, dets.shape[0])
    colors_mask = random_colors(num_dets)
    colors_bbox = np.round(np.random.rand(num_dets, 3) * 255)
    # sort rois according to the coordinates, draw upper bbox first
    draw_mask = np.zeros(im.shape[:2], dtype=np.uint8)

    for i in range(1):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        mask = masks[i, :, :]
        full_mask = unmold_mask(mask, bbox, im.shape)

        score = dets[i, -1]
        if score > thresh:
            word_width = len(class_name)
            cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2)
            cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED)
            apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5)
            draw_mask += full_mask
            cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN,
								1.0, (255,255,255), thickness=1)
    return im 
開發者ID:guoruoqian,項目名稱:cascade-rcnn_Pytorch,代碼行數:25,代碼來源:net_utils.py

示例6: ProcessFrame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def ProcessFrame(self, frame):
        # segment arm region
        segment = self.SegmentArm(frame)

        # make a copy of the segmented image to draw on
        draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)

        # draw some helpers for correctly placing hand
        cv2.circle(draw,(self.imgWidth/2,self.imgHeight/2),3,[255,102,0],2)       
        cv2.rectangle(draw, (self.imgWidth/3,self.imgHeight/3), (self.imgWidth*2/3, self.imgHeight*2/3), [255,102,0],2)

        # find the hull of the segmented area, and based on that find the
        # convexity defects
        [contours,defects] = self.FindHullDefects(segment)

        # detect the number of fingers depending on the contours and convexity defects
        # draw defects that belong to fingers green, others red
        [nofingers,draw] = self.DetectNumberFingers(contours, defects, draw)

        # print number of fingers on image
        cv2.putText(draw, str(nofingers), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
        return draw 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:24,代碼來源:chapter2.py

示例7: draw_outputs

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def draw_outputs(img, outputs, class_names=None):
    boxes, objectness, classes = outputs
    #boxes, objectness, classes = boxes[0], objectness[0], classes[0]
    wh = np.flip(img.shape[0:2])
    if img.ndim == 2 or img.shape[2] == 1:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    min_wh = np.amin(wh)
    if min_wh <= 100:
        font_size = 0.5
    else:
        font_size = 1
    for i in range(classes.shape[0]):
        x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
        x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 1)
        img = cv2.putText(img, '{}'.format(int(classes[i])), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size,
                          (0, 0, 255), 1)
    return img 
開發者ID:akkaze,項目名稱:tf2-yolo3,代碼行數:20,代碼來源:utils.py

示例8: draw_labels

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def draw_labels(x, y, class_names=None):
    img = x.numpy()
    if img.ndim == 2 or img.shape[2] == 1:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    boxes, classes = tf.split(y, (4, 1), axis=-1)
    classes = classes[..., 0]
    wh = np.flip(img.shape[0:2])
    min_wh = np.amin(wh)
    if min_wh <= 100:
        font_size = 0.5
    else:
        font_size = 1
    for i in range(len(boxes)):
        x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
        x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 1)
        if class_names:
            img = cv2.putText(img, class_names[classes[i]], x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size,
                              (0, 0, 255), 1)
        else:
            img = cv2.putText(img, str(classes[i]), x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
    return img 
開發者ID:akkaze,項目名稱:tf2-yolo3,代碼行數:24,代碼來源:utils.py

示例9: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
開發者ID:PINTO0309,項目名稱:MobileNetv2-SSDLite,代碼行數:26,代碼來源:demo_caffe.py

示例10: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
開發者ID:PINTO0309,項目名稱:MobileNetv2-SSDLite,代碼行數:26,代碼來源:demo_caffe_voc.py

示例11: overlay_boxes

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def overlay_boxes(self, image, predictions):
        """
        Adds the predicted boxes on top of the image

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `labels`.
        """
        labels = predictions.get_field("labels")
        boxes = predictions.bbox

        colors = self.compute_colors_for_labels(labels).tolist()

        for box, color in zip(boxes, colors):
            box = box.to(torch.int64)
            top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
            image = cv2.rectangle(
                image, tuple(top_left), tuple(bottom_right), tuple(color), 1
            )

        return image 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:24,代碼來源:predictor.py

示例12: show

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def show(self, index):
        img, target = self.__getitem__(index)
        for obj in target:
            obj = obj.astype(np.int)
            cv2.rectangle(img, (obj[0], obj[1]), (obj[2], obj[3]), (255,0,0), 3)
        cv2.imwrite('./image.jpg', img)




## test
# if __name__ == '__main__':
#     ds = VOCDetection('../../../../../dataset/VOCdevkit/', [('2012', 'train')],
#             None, AnnotationTransform())
#     print(len(ds))
#     img, target = ds[0]
#     print(target)
#     ds.show(1) 
開發者ID:ShuangXieIrene,項目名稱:ssds.pytorch,代碼行數:20,代碼來源:voc.py

示例13: vis_class

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def vis_class(img, pos, class_str, bg_color):
    """Visualizes the class."""
    font_color = cfg.VIS.SHOW_CLASS.COLOR
    font_scale = cfg.VIS.SHOW_CLASS.FONT_SCALE

    x0, y0 = int(pos[0]), int(pos[1])
    # Compute text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # Place text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, bg_color, -1)
    # Show text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA)

    return img 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:21,代碼來源:vis.py

示例14: intersection_over_union

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def intersection_over_union(box_a, box_b):
    # determine the (x, y)-coordinates of the intersection rectangle
    intersect = intersection(box_a, box_b)

    # compute the area of intersection rectangle
    inter_area = max(0, intersect[2] - intersect[0] + 1) * max(0, intersect[3] - intersect[1] + 1)

    if inter_area == 0:
        return 0.0
    
    # compute the area of both the prediction and ground-truth
    # rectangles
    box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
    box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)

    # compute the intersection over union by taking the intersection
    # area and dividing it by the sum of prediction + ground-truth
    # areas - the interesection area
    iou = inter_area / float(box_a_area + box_b_area - inter_area)

    # return the intersection over union value
    return iou 
開發者ID:blakeblackshear,項目名稱:frigate,代碼行數:24,代碼來源:util.py

示例15: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import rectangle [as 別名]
def main():
	#IMG PATHS
	imagePath = "test3.jpg"
	cascPath = "cascades/haarcascade_pedestrian.xml"

	pplCascade = cv2.CascadeClassifier(cascPath)
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	gray = normalize_grayimage(gray)
	 
	pedestrians = pplCascade.detectMultiScale(
		gray,
		scaleFactor=1.2,
		minNeighbors=10,
		minSize=(32,96),
		flags = cv2.cv.CV_HAAR_SCALE_IMAGE
	)

	print "Found {0} ppl!".format(len(pedestrians))

	#Draw a rectangle around the detected objects
	for (x, y, w, h) in pedestrians:
		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

	cv2.imwrite("saida.jpg", image)
	cv2.imshow("Ppl found", image)
	cv2.waitKey(0)
	
	return 0 
開發者ID:felipecorrea,項目名稱:pedestrian-haar-based-detector,代碼行數:32,代碼來源:detect.py


注:本文中的cv2.rectangle方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。