當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.LINE_AA屬性代碼示例

本文整理匯總了Python中cv2.LINE_AA屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.LINE_AA屬性的具體用法?Python cv2.LINE_AA怎麽用?Python cv2.LINE_AA使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.LINE_AA屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: vis_parsing

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def vis_parsing(img, parsing, colormap, show_segms=True):
    """Visualizes a single binary parsing."""
    img = img.astype(np.float32)
    idx = np.nonzero(parsing)

    parsing_alpha = cfg.VIS.SHOW_PARSS.PARSING_ALPHA
    colormap = colormap_utils.dict2array(colormap)
    parsing_color = colormap[parsing.astype(np.int)]

    border_color = cfg.VIS.SHOW_PARSS.BORDER_COLOR
    border_thick = cfg.VIS.SHOW_PARSS.BORDER_THICK

    img[idx[0], idx[1], :] *= 1.0 - parsing_alpha
    # img[idx[0], idx[1], :] += alpha * parsing_color
    img += parsing_alpha * parsing_color

    if cfg.VIS.SHOW_PARSS.SHOW_BORDER and not show_segms:
        _, contours, _ = cv2.findContours(parsing.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(img, contours, -1, border_color, border_thick, cv2.LINE_AA)

    return img.astype(np.uint8) 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:23,代碼來源:vis.py

示例2: vis_mask

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def vis_mask(img, mask, bbox_color, show_parss=False):
    """Visualizes a single binary mask."""
    img = img.astype(np.float32)
    idx = np.nonzero(mask)

    border_color = cfg.VIS.SHOW_SEGMS.BORDER_COLOR
    border_thick = cfg.VIS.SHOW_SEGMS.BORDER_THICK

    mask_color = bbox_color if cfg.VIS.SHOW_SEGMS.MASK_COLOR_FOLLOW_BOX else _WHITE
    mask_color = np.asarray(mask_color)
    mask_alpha = cfg.VIS.SHOW_SEGMS.MASK_ALPHA

    _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
    if cfg.VIS.SHOW_SEGMS.SHOW_BORDER:
        cv2.drawContours(img, contours, -1, border_color, border_thick, cv2.LINE_AA)

    if cfg.VIS.SHOW_SEGMS.SHOW_MASK and not show_parss:
        img[idx[0], idx[1], :] *= 1.0 - mask_alpha
        img[idx[0], idx[1], :] += mask_alpha * mask_color

    return img.astype(np.uint8) 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:23,代碼來源:vis.py

示例3: drawSplinesOnImage

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def drawSplinesOnImage(splines, color, im, pc, image_scale):
    for s in splines:
        # Get the shape of this spline and draw it on the image
        nds = []
        for wp in s["waypoints"]:
            nds.append(pc.tgcToCV2(wp["waypoint"]["x"], wp["waypoint"]["y"], image_scale))

        # Don't try to draw malformed splines
        if len(nds) == 0:
            continue

        # Uses points and not image pixels, so flip the x and y
        nds = np.array(nds)
        nds[:,[0, 1]] = nds[:,[1, 0]]
        nds = np.int32([nds]) # Bug with fillPoly, needs explict cast to 32bit

        thickness = int(s["width"])
        if(thickness < image_scale):
            thickness = int(image_scale)

        if s["isFilled"]:
            cv2.fillPoly(im, nds, color, lineType=cv2.LINE_AA)
        else:
            cv2.polylines(im, nds, s["isClosed"], color, thickness, lineType=cv2.LINE_AA) 
開發者ID:chadrockey,項目名稱:TGC-Designer-Tools,代碼行數:26,代碼來源:tgc_visualizer.py

示例4: drawWayOnImage

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def drawWayOnImage(way, color, im, pc, image_scale, thickness=-1, x_offset=0.0, y_offset=0.0):
    # Get the shape of this way and draw it as a poly
    nds = []
    for node in way.get_nodes(resolve_missing=True): # Allow automatically resolving missing nodes, but this is VERY slow with the API requests, try to request them above instead
        nds.append(pc.latlonToCV2(node.lat, node.lon, image_scale, x_offset, y_offset))
    # Uses points and not image pixels, so flip the x and y
    nds = np.array(nds)
    nds[:,[0, 1]] = nds[:,[1, 0]]
    nds = np.int32([nds]) # Bug with fillPoly, needs explict cast to 32bit
    cv2.fillPoly(im, nds, color) 

    # Add option to draw shape again, but with thick line
    # Use this to automatically expand some shapes, for example water
    # For better masking
    if thickness > 0:
        # Need to draw again since fillPoly has no line thickness options that I've found
        cv2.polylines(im, nds, True, color, thickness, lineType=cv2.LINE_AA) 
開發者ID:chadrockey,項目名稱:TGC-Designer-Tools,代碼行數:19,代碼來源:OSMTGC.py

示例5: vis_class

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def vis_class(img, pos, class_str, bg_color):
    """Visualizes the class."""
    font_color = cfg.VIS.SHOW_CLASS.COLOR
    font_scale = cfg.VIS.SHOW_CLASS.FONT_SCALE

    x0, y0 = int(pos[0]), int(pos[1])
    # Compute text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # Place text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, bg_color, -1)
    # Show text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA)

    return img 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:21,代碼來源:vis.py

示例6: show_who_in_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.detect_which(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			color = self.colors[who]
			x1, x2, y1, y2 = frame
			cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
			cv2.putText(image, f"{who}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(0)

		return image 
開發者ID:aangfanboy,項目名稱:TripletLossFace,代碼行數:19,代碼來源:face_recognition_tester.py

示例7: show_who_in_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.index_image(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			try:
				color = self.colors[str(who)]
				x1, x2, y1, y2 = frame
				cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
				cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
			except KeyError:
				continue

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(1)

		return image, min_im, all_frames 
開發者ID:aangfanboy,項目名稱:TripletLossFace,代碼行數:22,代碼來源:main_engine.py

示例8: add_coco_bbox

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'): 
        bbox = np.array(bbox, dtype=np.int32)
        # cat = (int(cat) + 1) % 80
        cat = int(cat)
        # print('cat', cat, self.names[cat])
        c = self.colors[cat][0][0].tolist()
        if self.theme == 'white':
            c = (255 - np.array(c)).tolist()
        txt = '{}{:.1f}'.format(self.names[cat], conf)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
        cv2.rectangle(
          self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
        if show_txt:
            cv2.rectangle(self.imgs[img_id],
                        (bbox[0], bbox[1] - cat_size[1] - 2),
                        (bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
            cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2), 
                      font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA) 
開發者ID:tensorboy,項目名稱:centerpose,代碼行數:21,代碼來源:debugger.py

示例9: label_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def label_images(images, labels):
    font = cv.FONT_HERSHEY_SIMPLEX
    new_imgs = []
    for i, img in enumerate(images):
        new_img = ((img.copy() + 1.) * 127.5).astype(np.uint8)
        if new_img.shape[-1] == 3:
            new_img = new_img[..., ::-1]
            new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR)
            new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255, 255, 255), 2, cv.LINE_AA)
            new_img = cv.copyMakeBorder(new_img, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT,
                                        value=(255, 255, 255))
        else:
            new_img = np.squeeze(new_img)
            new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR)
            new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255), 2, cv.LINE_AA)
            new_img = new_img[..., None]

        new_img = (new_img / 127.5 - 1.0).astype(np.float32)
        new_imgs.append(new_img[..., ::-1])
    return np.stack(new_imgs, axis=0) 
開發者ID:ermongroup,項目名稱:generative_adversary,代碼行數:22,代碼來源:utils.py

示例10: draw_box_label

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def draw_box_label(id,img, bbox_cv2, box_color=(0, 255, 255), show_label=True):
    '''
    Helper funciton for drawing the bounding boxes and the labels
    bbox_cv2 = [left, top, right, bottom]
    '''
    #box_color= (0, 255, 255)
    font = cv2.FONT_HERSHEY_SIMPLEX
    font_size = 0.7
    font_color = (0, 0, 0)
    left, top, right, bottom = bbox_cv2[1], bbox_cv2[0], bbox_cv2[3], bbox_cv2[2]
    
    # Draw the bounding box
    cv2.rectangle(img, (left, top), (right, bottom), box_color, 4)
    
    if show_label:
        # Draw a filled box on top of the bounding box (as the background for the labels)
        cv2.rectangle(img, (left-2, top-45), (right+2, top), box_color, -1, 1)
        
        # Output the labels that show the x and y coordinates of the bounding box center.
        text_x= 'id='+str(id)
        cv2.putText(img,text_x,(left,top-25), font, font_size, font_color, 1, cv2.LINE_AA)
        text_y= 'y='+str((top+bottom)/2)
        cv2.putText(img,text_y,(left,top-5), font, font_size, font_color, 1, cv2.LINE_AA)
    
    return img 
開發者ID:ambakick,項目名稱:Person-Detection-and-Tracking,代碼行數:27,代碼來源:helpers.py

示例11: vis_class

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def vis_class(img, pos, class_str, font_scale=0.35):
    """Visualizes the class."""
    img = img.astype(np.uint8)
    x0, y0 = int(pos[0]), int(pos[1])
    # Compute text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # Place text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
    # Show text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)
    return img 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:18,代碼來源:vis.py

示例12: drawFloorCrop

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def drawFloorCrop(event, x, y, flags, params):
    global perspectiveMatrix, name, RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow(f'Floor Corners for {name}')
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h = params['imgFloorCorners'].shape[0]
        # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x,y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x,y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(
                imgCroppingPolygon,
                [np.reshape(
                    params['croppingPolygons'][name],
                    (len(params['croppingPolygons'][name]),2)
                )],
                BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) 
開發者ID:colinlaney,項目名稱:animal-tracking,代碼行數:40,代碼來源:track.py

示例13: plot_one_box

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    # Plots one bounding box on image img
    tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1  # line thickness
    color = color or [random.randint(0, 255) for _ in range(3)]
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness=tl)
    if label:
        tf = max(tl - 1, 1)  # font thickness
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1)  # filled
        cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:14,代碼來源:utils.py

示例14: get_last

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def get_last(self):
        ''' Gets latest from camera and model '''
        p = self.conn.pipeline()
        p.xrevrange(self.camera, count=1)  # Latest frame
        p.xrevrange(self.boxes, count=1)   # Latest boxes
        cmsg, bmsg = p.execute()
        if cmsg:
            last_id = cmsg[0][0].decode('utf-8')
            label = f'{self.camera}:{last_id}'
            data = io.BytesIO(cmsg[0][1][self.field])
            img = Image.open(data)
            if bmsg:
                boxes = np.fromstring(bmsg[0][1]['boxes'.encode('utf-8')][1:-1], sep=',')
                label += ' people: {}'.format(bmsg[0][1]['people'.encode('utf-8')].decode('utf-8'))
                for box in range(int(bmsg[0][1]['people'.encode('utf-8')])):  # Draw boxes
                    x1 = boxes[box*4]
                    y1 = boxes[box*4+1]
                    x2 = boxes[box*4+2]
                    y2 = boxes[box*4+3]
                    draw = ImageDraw.Draw(img)
                    draw.rectangle(((x1, y1), (x2, y2)), width=5, outline='red')
            arr = np.array(img)
            arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
            cv2.putText(arr, label, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 1, cv2.LINE_AA)
            ret, img = cv2.imencode('.jpg', arr)
            return img.tobytes()
        else:
            # TODO: put an 'we're experiencing technical difficlties' image
            pass 
開發者ID:RedisGears,項目名稱:EdgeRealtimeVideoAnalytics,代碼行數:31,代碼來源:server.py

示例15: draw

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import LINE_AA [as 別名]
def draw():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")]
    txt_files = [s + ".txt" for s in filenames]
    for txt in txt_files:
        image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR)
        with open(txt, 'r') as txt_file:
            for line in csv.reader(txt_file):
                box = [int(string, 10) for string in line[0:8]]
                if len(line) < 9:
                    print(txt)
                cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
        cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image) 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:16,代碼來源:main.py


注:本文中的cv2.LINE_AA屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。