當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.FONT_HERSHEY_TRIPLEX屬性代碼示例

本文整理匯總了Python中cv2.FONT_HERSHEY_TRIPLEX屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.FONT_HERSHEY_TRIPLEX屬性的具體用法?Python cv2.FONT_HERSHEY_TRIPLEX怎麽用?Python cv2.FONT_HERSHEY_TRIPLEX使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.FONT_HERSHEY_TRIPLEX屬性的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: vis_detections

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def vis_detections(im, class_name, dets, thresh=0.5):
    """Visual debugging of detections."""
    for i in range(np.minimum(10, dets.shape[0])):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        score = dets[i, -1]
        if score > 0.8:
            cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 110, 255), 5)

            text = '%.3f' % (score)

            (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_TRIPLEX, fontScale=1.2, thickness=2)[0]

            cv2.rectangle(im, (bbox[0], bbox[1] ), (bbox[0]+text_width, bbox[1] + text_height), (0, 255, 251), -1)

            cv2.putText(im,text , (bbox[0], bbox[1]+text_height), cv2.FONT_HERSHEY_TRIPLEX, 1.2, (0, 0, 0), thickness=2)
    return im 
開發者ID:timy90022,項目名稱:One-Shot-Object-Detection,代碼行數:18,代碼來源:net_utils.py

示例2: draw

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def draw():
    f = open(box_path + 'jpglist.txt')

    # read each image and its label
    line = f.readline()
    line_num =0
    while line:
        line_num=line_num+1
        print('Image:', line_num)
        name = line.strip('\n')
        img = cv2.imread(image_path + name)
        img_size = img.shape
        img_size = img_size[0]*img_size[1]

        # read each coordinate and draw box
        f_txt = open(image_path + name.strip('.jpg') + '.txt')
        #line_txt = f_txt.readline()  # pass the first ROI information
        line_txt = f_txt.readline()
        while line_txt:
            coor = line_txt.split(',')
            x1 = int(coor[0].strip('\''))
            y1 = int(coor[1].strip('\''))
            x3 = int(coor[4].strip('\''))
            y3 = int(coor[5].strip('\''))
            text = coor[8].strip('\n').strip('\'')
            text_show = text + '(' + str(x1) + ',' + str(y1) +')'

            cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1)
            #cv2.putText(img, text_show, (x1, y1 - 1),
              #          cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1)
            line_txt = f_txt.readline()
        cv2.imwrite(box_path + name, img)
        line = f.readline()
        # img = cv2.imshow('image', img)
        # cv2.waitKey(0) 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:37,代碼來源:boxing.py

示例3: show_failed_capture_frame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def show_failed_capture_frame(self):
        width, height = int(self.width * self.output_scale_factor), int(self.height * self.output_scale_factor)
        info_frame = np.zeros((height, width, 3), np.uint8)
        print("py: Capture failed, unable to find chessboard! Fix position and press spacebar again")

        def show(position, text):
            cv2.putText(info_frame, text, position, cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 255, 0))

        show((50, int(height / 2 - 40)), "Capture failed, unable to find chessboard!")
        show((60, int(height / 2 + 40)), "Fix position and press spacebar again")

        # cv2.imshow("left", info_frame)
        # cv2.imshow("right", info_frame)
        cv2.imshow("left + right",info_frame)
        cv2.waitKey(2000) 
開發者ID:luxonis,項目名稱:depthai,代碼行數:17,代碼來源:calibrate.py

示例4: put_text_on_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def put_text_on_image(image, lines):
  for i, line in enumerate(lines[::-1]):
    text_width, text_height = cv2.getTextSize(line, cv2.FONT_HERSHEY_TRIPLEX,
                                              0.4, 1)[0]
    cv2.putText(image, line, (image.shape[1] - text_width,
                              image.shape[0] - 2 * i * text_height - 10),
                cv2.FONT_HERSHEY_TRIPLEX, 0.4, [0, 0, 0]) 
開發者ID:yuanming-hu,項目名稱:fc4,代碼行數:9,代碼來源:summary_utils.py

示例5: put_text

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def put_text(img, text, position, scale_factor=1):
    t_w, t_h = cv2.getTextSize(text,
                               cv2.FONT_HERSHEY_TRIPLEX,
                               scale_factor,
                               thickness=1)[0]
    H, W, _ = img.shape
    position = (int(W * position[1] - t_w * 0.5),
                int(H * position[0] - t_h * 0.5))
    params = (position, cv2.FONT_HERSHEY_TRIPLEX, scale_factor, (255, 255,
                                                                 255))
    cv2.putText(img, text, *params) 
開發者ID:open-mmlab,項目名稱:mmskeleton,代碼行數:13,代碼來源:visualization.py

示例6: put_text

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def put_text(img, text, position, scale_factor=1):
    t_w, t_h = cv2.getTextSize(
        text, cv2.FONT_HERSHEY_TRIPLEX, scale_factor, thickness=1)[0]
    H, W, _ = img.shape
    position = (int(W * position[1] - t_w * 0.5), int(H * position[0] - t_h * 0.5))
    params = (position, cv2.FONT_HERSHEY_TRIPLEX, scale_factor,
            (255,255,255))
    cv2.putText(img, text, *params) 
開發者ID:1zgh,項目名稱:st-gcn,代碼行數:10,代碼來源:visualization.py

示例7: add_text_to_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def add_text_to_image(image,
                      text='',
                      position=None,
                      font=cv2.FONT_HERSHEY_TRIPLEX,
                      font_size=1.0,
                      line_type=cv2.LINE_8,
                      line_width=1,
                      color=(255, 255, 255)):
  """Overlays text on given image.

  NOTE: The input image is assumed to be with `RGB` channel order.

  Args:
    image: The image to overlay text on.
    text: Text content to overlay on the image. (default: '')
    position: Target position (bottom-left corner) to add text. If not set,
      center of the image will be used by default. (default: None)
    font: Font of the text added. (default: cv2.FONT_HERSHEY_TRIPLEX)
    font_size: Font size of the text added. (default: 1.0)
    line_type: Line type used to depict the text. (default: cv2.LINE_8)
    line_width: Line width used to depict the text. (default: 1)
    color: Color of the text added in `RGB` channel order. (default:
      (255, 255, 255))

  Returns:
    An image with target text overlayed on.
  """
  if image is None or not text:
    return image

  cv2.putText(img=image,
              text=text,
              org=position,
              fontFace=font,
              fontScale=font_size,
              color=color,
              thickness=line_width,
              lineType=line_type,
              bottomLeftOrigin=False)

  return image 
開發者ID:genforce,項目名稱:higan,代碼行數:43,代碼來源:visualizer.py

示例8: draw_boxes_v2

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def draw_boxes_v2(image, boxes, labels,listPrediction):
    
    i=0
    for box in boxes:
        xmin  = int((box.x - box.w/2) * image.shape[1])
        xmax  = int((box.x + box.w/2) * image.shape[1])
        ymin  = int((box.y - box.h/2) * image.shape[0])
        ymax  = int((box.y + box.h/2) * image.shape[0])

        cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (0,255,0), 3)
        temp=array( listPrediction[i][3])
        if temp[0]>temp[1]:
           gender="Female"
        else:
           gender="Male"
        
        (gender +' ' + str(listPrediction[i][1]) +' '+ str(listPrediction[i][0]))
              
        a=int((xmin+xmax)/2)-10
        cv2.putText(image,gender +' ' + str(listPrediction[i][1]),
        (xmin, ymax+25),
        fontFace=cv2.FONT_HERSHEY_TRIPLEX,
        fontScale=1e-3 *3* image.shape[0], 
        color=(0, 255, 0))          
        i=i+1
        cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        
        #del listPrediction
        del gender
        del box

        
    return image 
開發者ID:OValery16,項目名稱:gender-age-classification,代碼行數:35,代碼來源:utils.py

示例9: draw

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def draw():
    f = open(box_path + 'jpglist.txt')

    rect_scale_pack=[]
    rect_ratio_pack=[]

    # read each image and its label
    line = f.readline()
    line_num =0
    while line:
        line_num=line_num+1
        print('Image:', line_num)
        name = line.strip('\n')
        img = cv2.imread(image_path + name)
        img_size = img.shape
        img_size = img_size[0]*img_size[1]

        # read each coordinate and draw box
        f_txt = open(image_path + name.strip('.jpg') + '.txt')
        line_txt = f_txt.readline()
        while line_txt:
            coor = line_txt.split(',')
            x1 = int(coor[0].strip('\''))
            y1 = int(coor[1].strip('\''))
            x3 = int(coor[4].strip('\''))
            y3 = int(coor[5].strip('\''))
            text = coor[8].strip('\n').strip('\'')

            rect_size = (x3-x1)*(y3-y1)
            rect_scale = np.sqrt(rect_size / img_size)
            rect_scale_pack.append(rect_scale)
            #print(rect_scale_pack)

            rect_ratio = (x3-x1)/(y3-y1)
            rect_ratio_pack.append(rect_ratio)


            #cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1)
            #cv2.putText(img, text, (x1, y1 - 1),
                        #cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1)
            line_txt = f_txt.readline()
        #cv2.imwrite(box_path + name, img)
        line = f.readline()
        # img = cv2.imshow('image', img)
        # cv2.waitKey(0)
    return rect_scale_pack,rect_ratio_pack 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:48,代碼來源:dataset_statistic.py

示例10: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def main(g_id):
    total_pics = 1200
    cap = cv2.VideoCapture(0)
    x, y, w, h = 300, 50, 350, 350

    create_folder("gestures/" + str(g_id))
    pic_no = 0
    flag_start_capturing = False
    frames = 0

    while True:
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        mask2 = cv2.inRange(hsv, np.array([2, 50, 60]), np.array([25, 150, 255]))
        res = cv2.bitwise_and(frame, frame, mask=mask2)
        gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        median = cv2.GaussianBlur(gray, (5, 5), 0)

        kernel_square = np.ones((5, 5), np.uint8)
        dilation = cv2.dilate(median, kernel_square, iterations=2)
        opening=cv2.morphologyEx(dilation,cv2.MORPH_CLOSE,kernel_square)

        ret, thresh = cv2.threshold(opening, 30, 255, cv2.THRESH_BINARY)
        thresh = thresh[y:y + h, x:x + w]
        contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]

        if len(contours) > 0:
            contour = max(contours, key=cv2.contourArea)
            if cv2.contourArea(contour) > 10000 and frames > 50:
                x1, y1, w1, h1 = cv2.boundingRect(contour)
                pic_no += 1
                save_img = thresh[y1:y1 + h1, x1:x1 + w1]
                if w1 > h1:
                    save_img = cv2.copyMakeBorder(save_img, int((w1 - h1) / 2), int((w1 - h1) / 2), 0, 0,
                                                  cv2.BORDER_CONSTANT, (0, 0, 0))
                elif h1 > w1:
                    save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1 - w1) / 2), int((h1 - w1) / 2),
                                                  cv2.BORDER_CONSTANT, (0, 0, 0))
                save_img = cv2.resize(save_img, (image_x, image_y))
                cv2.putText(frame, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255))
                cv2.imwrite("gestures/" + str(g_id) + "/" + str(pic_no) + ".jpg", save_img)

        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255))
        cv2.imshow("Capturing gesture", frame)
        cv2.imshow("thresh", thresh)
        keypress = cv2.waitKey(1)
        if keypress == ord('c'):
            if flag_start_capturing == False:
                flag_start_capturing = True
            else:
                flag_start_capturing = False
                frames = 0
        if flag_start_capturing == True:
            frames += 1
        if pic_no == total_pics:
            break 
開發者ID:akshaybahadur21,項目名稱:Emojinator,代碼行數:61,代碼來源:CreateGest.py

示例11: store_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import FONT_HERSHEY_TRIPLEX [as 別名]
def store_images(g_id):
	total_pics = 1200
	hist = get_hand_hist()
	cam = cv2.VideoCapture(1)
	if cam.read()[0]==False:
		cam = cv2.VideoCapture(0)
	x, y, w, h = 300, 100, 300, 300

	create_folder("gestures/"+str(g_id))
	pic_no = 0
	flag_start_capturing = False
	frames = 0
	
	while True:
		img = cam.read()[1]
		img = cv2.flip(img, 1)
		imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
		dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1)
		disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
		cv2.filter2D(dst,-1,disc,dst)
		blur = cv2.GaussianBlur(dst, (11,11), 0)
		blur = cv2.medianBlur(blur, 15)
		thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
		thresh = cv2.merge((thresh,thresh,thresh))
		thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
		thresh = thresh[y:y+h, x:x+w]
		contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]

		if len(contours) > 0:
			contour = max(contours, key = cv2.contourArea)
			if cv2.contourArea(contour) > 10000 and frames > 50:
				x1, y1, w1, h1 = cv2.boundingRect(contour)
				pic_no += 1
				save_img = thresh[y1:y1+h1, x1:x1+w1]
				if w1 > h1:
					save_img = cv2.copyMakeBorder(save_img, int((w1-h1)/2) , int((w1-h1)/2) , 0, 0, cv2.BORDER_CONSTANT, (0, 0, 0))
				elif h1 > w1:
					save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1-w1)/2) , int((h1-w1)/2) , cv2.BORDER_CONSTANT, (0, 0, 0))
				save_img = cv2.resize(save_img, (image_x, image_y))
				rand = random.randint(0, 10)
				if rand % 2 == 0:
					save_img = cv2.flip(save_img, 1)
				cv2.putText(img, "Capturing...", (30, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (127, 255, 255))
				cv2.imwrite("gestures/"+str(g_id)+"/"+str(pic_no)+".jpg", save_img)

		cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
		cv2.putText(img, str(pic_no), (30, 400), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (127, 127, 255))
		cv2.imshow("Capturing gesture", img)
		cv2.imshow("thresh", thresh)
		keypress = cv2.waitKey(1)
		if keypress == ord('c'):
			if flag_start_capturing == False:
				flag_start_capturing = True
			else:
				flag_start_capturing = False
				frames = 0
		if flag_start_capturing == True:
			frames += 1
		if pic_no == total_pics:
			break 
開發者ID:harshbg,項目名稱:Sign-Language-Interpreter-using-Deep-Learning,代碼行數:62,代碼來源:create_gestures.py


注:本文中的cv2.FONT_HERSHEY_TRIPLEX屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。