本文整理汇总了Python中cv2.WND_PROP_FULLSCREEN属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.WND_PROP_FULLSCREEN属性的具体用法?Python cv2.WND_PROP_FULLSCREEN怎么用?Python cv2.WND_PROP_FULLSCREEN使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.WND_PROP_FULLSCREEN属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def main():
'''
Arguments to be set:
showCam : determine if show the camera preview screen.
'''
print("Enter main() function")
if args.testImage is not None:
img = cv2.imread(args.testImage)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, FACE_SHAPE)
print(class_label[result[0]])
sys.exit(0)
showCam = 1
capture = getCameraStreaming()
if showCam:
cv2.startWindowThread()
cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
showScreenAndDectect(capture)
开发者ID:a514514772,项目名称:Real-Time-Facial-Expression-Recognition-with-DeepLearning,代码行数:26,代码来源:webcam_detection.py
示例2: set_display
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def set_display(window_name, full_scrn):
"""Set disply window to either full screen or normal."""
if full_scrn:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
示例3: set_full_screen
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def set_full_screen(full_scrn):
"""Set display window to full screen or not."""
prop = cv2.WINDOW_FULLSCREEN if full_scrn else cv2.WINDOW_NORMAL
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, prop)
示例4: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def main():
'''
Arguments to be set:
showCam : determine if show the camera preview screen.
'''
print("Enter main() function")
capture = getCameraStreaming()
cv2.startWindowThread()
cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
while True:
recContent = speechRecognition()
if recContent is not None:
emotion = showScreenAndDectect(capture)
if emotion == "Angry":
emoji = " >:O"
elif emotion == "Fear":
emoji = " :-S"
elif emotion == "Happy":
emoji = " :-D"
elif emotion == "Sad":
emoji = " :'("
elif emotion == "Surprise":
emoji = " :-O"
else:
emoji = " "
print("Output result: " + recContent + emoji)
开发者ID:a514514772,项目名称:Real-Time-Facial-Expression-Recognition-with-DeepLearning,代码行数:32,代码来源:gen_sentence_with_emoticons.py
示例5: previewImage
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def previewImage(self, text, img):
#show full screen
cv2.namedWindow(text, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(text,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow(text,img)
cv2.waitKey(2000)
cv2.destroyAllWindows()
示例6: previewImage
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def previewImage(self, text, img):
if self.PREVIEW_IMAGES==True:
#show full screen
cv2.namedWindow(text, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(text,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow(text,img)
if self.PREVIEW_AUTOCLOSE==True:
cv2.waitKey(2000)
cv2.destroyAllWindows()
else:
cv2.waitKey(0)
cv2.destroyAllWindows()
示例7: maximize_win
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def maximize_win(winname):
cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, True);
示例8: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 别名]
def main():
# load config
cfg.merge_from_file(args.config)
cfg.CUDA = torch.cuda.is_available() and cfg.CUDA
device = torch.device('cuda' if cfg.CUDA else 'cpu')
# create model
model = ModelBuilder()
# load model
model.load_state_dict(torch.load(args.snapshot,
map_location=lambda storage, loc: storage.cpu()))
model.eval().to(device)
# build tracker
tracker = build_tracker(model)
first_frame = True
if args.video_name:
video_name = args.video_name.split('/')[-1].split('.')[0]
else:
video_name = 'webcam'
cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)
for frame in get_frames(args.video_name):
if first_frame:
try:
init_rect = cv2.selectROI(video_name, frame, False, False)
except:
exit()
tracker.init(frame, init_rect)
first_frame = False
else:
outputs = tracker.track(frame)
if 'polygon' in outputs:
polygon = np.array(outputs['polygon']).astype(np.int32)
cv2.polylines(frame, [polygon.reshape((-1, 1, 2))],
True, (0, 255, 0), 3)
mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255)
mask = mask.astype(np.uint8)
mask = np.stack([mask, mask*255, mask]).transpose(1, 2, 0)
frame = cv2.addWeighted(frame, 0.77, mask, 0.23, -1)
else:
bbox = list(map(int, outputs['bbox']))
cv2.rectangle(frame, (bbox[0], bbox[1]),
(bbox[0]+bbox[2], bbox[1]+bbox[3]),
(0, 255, 0), 3)
cv2.imshow(video_name, frame)
cv2.waitKey(40)