本文整理匯總了Python中cv2.WINDOW_GUI_NORMAL屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.WINDOW_GUI_NORMAL屬性的具體用法?Python cv2.WINDOW_GUI_NORMAL怎麽用?Python cv2.WINDOW_GUI_NORMAL使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.WINDOW_GUI_NORMAL屬性的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: cv2_show_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_GUI_NORMAL [as 別名]
def cv2_show_image(window_name, image,
size_wh=None, location_xy=None):
"""Helper function for specifying window size and location when
displaying images with cv2.
Args:
window_name: str window name
image: ndarray image to display
size_wh: window size (w, h)
location_xy: window location (x, y)
"""
if size_wh is not None:
cv2.namedWindow(window_name,
cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
cv2.resizeWindow(window_name, *size_wh)
else:
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
if location_xy is not None:
cv2.moveWindow(window_name, *location_xy)
cv2.imshow(window_name, image)
示例2: cv2_imshow
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_GUI_NORMAL [as 別名]
def cv2_imshow(window_name, image,
size_wh=None, row_col=None, location_xy=None):
"""Helper function for specifying window size and location when
displaying images with cv2
Args:
window_name (string): Window title
image: image to display
size_wh: resize window
Recommended sizes for 1920x1080 screen:
2 col: (930, 280)
3 col: (620, 187)
4 col: (465, 140)
row_col: Row and column to show images like subplots
location_xy: location of window
"""
if size_wh is not None:
cv2.namedWindow(window_name, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
cv2.resizeWindow(window_name, *size_wh)
else:
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE | cv2.WINDOW_GUI_NORMAL)
if row_col is not None:
start_x_offset = 60
start_y_offset = 25
y_offset = 28
subplot_row = row_col[0]
subplot_col = row_col[1]
location_xy = (start_x_offset + subplot_col * size_wh[0],
start_y_offset + subplot_row * size_wh[1] + subplot_row * y_offset)
if location_xy is not None:
cv2.moveWindow(window_name, *location_xy)
cv2.imshow(window_name, image)
示例3: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_GUI_NORMAL [as 別名]
def main():
args = parse_args()
update_config(cfg, args)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
########## 加載human detecotor model
from lib.detector.yolo.human_detector import load_model as yolo_model
human_model = yolo_model()
from lib.detector.yolo.human_detector import human_bbox_get as yolo_det
bboxs, scores = yolo_det(args.img_input, human_model, confidence=0.5) # bboxes (N, 4) [x0, y0, x1, y1]
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(args.img_input, bboxs, scores, cfg)
# load MODEL
model = model_load(cfg)
with torch.no_grad():
# compute output heatmap
# inputs = inputs[:,[2,1,0]]
# inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB)
output = model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(
cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
image = plot_keypoint(origin_img, preds, maxvals, 0.3)
cv2.imwrite(args.img_output, image)
if args.display:
cv2.namedWindow("enhanced", cv2.WINDOW_GUI_NORMAL);
cv2.resizeWindow("enhanced", 960, 480);
cv2.imshow('enhanced', image)
cv2.waitKey(5000)
示例4: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_GUI_NORMAL [as 別名]
def main():
args = parse_args()
update_config(cfg, args)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
########## 加載human detecotor model
from lib.detector.mmdetection.high_api import load_model
human_model = load_model()
from lib.detector.mmdetection.high_api import human_boxes_get as mmd_detector
bboxs, scores = mmd_detector(human_model, args.img_input) # bboxes (N, 4) [x0, y0, x1, y1]
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(args.img_input, bboxs, scores, cfg)
# load HRNET MODEL
model = model_load(cfg)
with torch.no_grad():
# compute output heatmap
# inputs = inputs[:,[2,1,0]]
# inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB)
output = model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(
cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
image = plot_keypoint(origin_img, preds, maxvals, 0.3)
cv2.imwrite(args.img_output, image)
if args.display:
cv2.namedWindow("enhanced", cv2.WINDOW_GUI_NORMAL);
cv2.resizeWindow("enhanced", 960, 480);
cv2.imshow('enhanced', image)
cv2.waitKey(5000)