本文整理匯總了Python中cv2.WND_PROP_FULLSCREEN屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.WND_PROP_FULLSCREEN屬性的具體用法?Python cv2.WND_PROP_FULLSCREEN怎麽用?Python cv2.WND_PROP_FULLSCREEN使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.WND_PROP_FULLSCREEN屬性的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def main():
'''
Arguments to be set:
showCam : determine if show the camera preview screen.
'''
print("Enter main() function")
if args.testImage is not None:
img = cv2.imread(args.testImage)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, FACE_SHAPE)
print(class_label[result[0]])
sys.exit(0)
showCam = 1
capture = getCameraStreaming()
if showCam:
cv2.startWindowThread()
cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
showScreenAndDectect(capture)
開發者ID:a514514772,項目名稱:Real-Time-Facial-Expression-Recognition-with-DeepLearning,代碼行數:26,代碼來源:webcam_detection.py
示例2: set_display
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def set_display(window_name, full_scrn):
"""Set disply window to either full screen or normal."""
if full_scrn:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
示例3: set_full_screen
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def set_full_screen(full_scrn):
"""Set display window to full screen or not."""
prop = cv2.WINDOW_FULLSCREEN if full_scrn else cv2.WINDOW_NORMAL
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, prop)
示例4: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def main():
'''
Arguments to be set:
showCam : determine if show the camera preview screen.
'''
print("Enter main() function")
capture = getCameraStreaming()
cv2.startWindowThread()
cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
while True:
recContent = speechRecognition()
if recContent is not None:
emotion = showScreenAndDectect(capture)
if emotion == "Angry":
emoji = " >:O"
elif emotion == "Fear":
emoji = " :-S"
elif emotion == "Happy":
emoji = " :-D"
elif emotion == "Sad":
emoji = " :'("
elif emotion == "Surprise":
emoji = " :-O"
else:
emoji = " "
print("Output result: " + recContent + emoji)
開發者ID:a514514772,項目名稱:Real-Time-Facial-Expression-Recognition-with-DeepLearning,代碼行數:32,代碼來源:gen_sentence_with_emoticons.py
示例5: previewImage
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def previewImage(self, text, img):
#show full screen
cv2.namedWindow(text, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(text,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow(text,img)
cv2.waitKey(2000)
cv2.destroyAllWindows()
示例6: previewImage
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def previewImage(self, text, img):
if self.PREVIEW_IMAGES==True:
#show full screen
cv2.namedWindow(text, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(text,cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.imshow(text,img)
if self.PREVIEW_AUTOCLOSE==True:
cv2.waitKey(2000)
cv2.destroyAllWindows()
else:
cv2.waitKey(0)
cv2.destroyAllWindows()
示例7: maximize_win
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def maximize_win(winname):
cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, True);
示例8: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WND_PROP_FULLSCREEN [as 別名]
def main():
# load config
cfg.merge_from_file(args.config)
cfg.CUDA = torch.cuda.is_available() and cfg.CUDA
device = torch.device('cuda' if cfg.CUDA else 'cpu')
# create model
model = ModelBuilder()
# load model
model.load_state_dict(torch.load(args.snapshot,
map_location=lambda storage, loc: storage.cpu()))
model.eval().to(device)
# build tracker
tracker = build_tracker(model)
first_frame = True
if args.video_name:
video_name = args.video_name.split('/')[-1].split('.')[0]
else:
video_name = 'webcam'
cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)
for frame in get_frames(args.video_name):
if first_frame:
try:
init_rect = cv2.selectROI(video_name, frame, False, False)
except:
exit()
tracker.init(frame, init_rect)
first_frame = False
else:
outputs = tracker.track(frame)
if 'polygon' in outputs:
polygon = np.array(outputs['polygon']).astype(np.int32)
cv2.polylines(frame, [polygon.reshape((-1, 1, 2))],
True, (0, 255, 0), 3)
mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255)
mask = mask.astype(np.uint8)
mask = np.stack([mask, mask*255, mask]).transpose(1, 2, 0)
frame = cv2.addWeighted(frame, 0.77, mask, 0.23, -1)
else:
bbox = list(map(int, outputs['bbox']))
cv2.rectangle(frame, (bbox[0], bbox[1]),
(bbox[0]+bbox[2], bbox[1]+bbox[3]),
(0, 255, 0), 3)
cv2.imshow(video_name, frame)
cv2.waitKey(40)