本文整理汇总了Python中cv2.WINDOW_AUTOSIZE属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.WINDOW_AUTOSIZE属性的具体用法?Python cv2.WINDOW_AUTOSIZE怎么用?Python cv2.WINDOW_AUTOSIZE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类cv2
的用法示例。
在下文中一共展示了cv2.WINDOW_AUTOSIZE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
示例2: preview
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def preview(self):
""" Blocking function. Opens OpenCV window to display stream. """
self.connect()
win_name = 'RTSP'
cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(win_name, 20, 20)
while True:
cv2.imshow(win_name, self.get_frame())
# if self._latest is not None:
# cv2.imshow(win_name,self._latest)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.waitKey()
cv2.destroyAllWindows()
cv2.waitKey()
示例3: parse_grid
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def parse_grid(path):
original = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
processed = pre_process_image(original)
# cv2.namedWindow('processed',cv2.WINDOW_AUTOSIZE)
# processed_img = cv2.resize(processed, (500, 500)) # Resize image
# cv2.imshow('processed', processed_img)
corners = find_corners_of_largest_polygon(processed)
cropped = crop_and_warp(original, corners)
# cv2.namedWindow('cropped',cv2.WINDOW_AUTOSIZE)
# cropped_img = cv2.resize(cropped, (500, 500)) # Resize image
# cv2.imshow('cropped', cropped_img)
squares = infer_grid(cropped)
# print(squares)
digits = get_digits(cropped, squares, 28)
# print(digits)
final_image = show_digits(digits)
return final_image
示例4: cv2_show_image
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def cv2_show_image(window_name, image,
size_wh=None, location_xy=None):
"""Helper function for specifying window size and location when
displaying images with cv2.
Args:
window_name: str window name
image: ndarray image to display
size_wh: window size (w, h)
location_xy: window location (x, y)
"""
if size_wh is not None:
cv2.namedWindow(window_name,
cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
cv2.resizeWindow(window_name, *size_wh)
else:
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
if location_xy is not None:
cv2.moveWindow(window_name, *location_xy)
cv2.imshow(window_name, image)
示例5: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def __init__(self, video_out=None, draw_frames=True, video_out_fourcc="DIVX", video_out_fps=2):
"""
A template class to annotate and save the processed frames. It can also save the annotated frames in a video
file and/or display them in a new window. The :meth:`~ethoscope.drawers.drawers.BaseDrawer._annotate_frame`
abstract method defines how frames are annotated.
:param video_out: The path to the output file (.avi)
:type video_out: str
:param draw_frames: Whether frames should be displayed on the screen (a new window will be created).
:type draw_frames: bool
:param video_out_fourcc: When setting ``video_out``, this defines the codec used to save the output video (see `fourcc <http://www.fourcc.org/codecs.php>`_)
:type video_out_fourcc: str
:param video_out_fps: When setting ``video_out``, this defines the output fps. typically, the same as the input fps.
:type video_out_fps: float
"""
self._video_out = video_out
self._draw_frames= draw_frames
self._video_writer = None
self._window_name = "ethoscope_" + str(os.getpid())
self._video_out_fourcc = video_out_fourcc
self._video_out_fps = video_out_fps
if draw_frames:
cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE)
self._last_drawn_frame = None
示例6: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'):
self.ale = ALEInterface()
self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
self.ale.setInt("random_seed",123)
self.ale.setInt("frame_skip",frameskip)
romfile = str(ROM_PATH)+str(rom_name)
if not os.path.exists(romfile):
print('No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.')
self.ale.loadROM(romfile)
self.legal_actions = self.ale.getMinimalActionSet()
self.action_map = dict()
self.windowname = windowname
for i in range(len(self.legal_actions)):
self.action_map[self.legal_actions[i]] = i
# print(self.legal_actions)
self.screen_width,self.screen_height = self.ale.getScreenDims()
print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
self.vis = vis
if vis:
cv2.startWindowThread()
cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing
示例7: show_camera
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def show_camera():
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if cap.isOpened():
window_handle = cv2.namedWindow('CSI Camera', cv2.WINDOW_AUTOSIZE)
# Window
while cv2.getWindowProperty('CSI Camera',0) >= 0:
ret_val, img = cap.read();
cv2.imshow('CSI Camera',img)
# This also acts as
keyCode = cv2.waitKey(30) & 0xff
# Stop the program on the ESC key
if keyCode == 27:
break
cap.release()
cv2.destroyAllWindows()
else:
print('Unable to open camera')
示例8: main
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def main(argv):
if (len(sys.argv) < 3):
print 'Not enough parameters'
print 'Usage:\nmatch_template_demo.py <image_name> <template_name> [<mask_name>]'
return -1
## [load_image]
global img
global templ
img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)
templ = cv2.imread(sys.argv[2], cv2.IMREAD_COLOR)
if (len(sys.argv) > 3):
global use_mask
use_mask = True
global mask
mask = cv2.imread( sys.argv[3], cv2.IMREAD_COLOR )
if ((img is None) or (templ is None) or (use_mask and (mask is None))):
print 'Can\'t read one of the images'
return -1
## [load_image]
## [create_windows]
cv2.namedWindow( image_window, cv2.WINDOW_AUTOSIZE )
cv2.namedWindow( result_window, cv2.WINDOW_AUTOSIZE )
## [create_windows]
## [create_trackbar]
trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED'
cv2.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod )
## [create_trackbar]
MatchingMethod(match_method)
## [wait_key]
cv2.waitKey(0)
return 0
## [wait_key]
示例9: add
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def add(self, name, image, weight=1):
'''
权重,越高,图片显示越大
:return:
'''
cv2.namedWindow(name, flags=cv2.WINDOW_AUTOSIZE)
window = Window(name, image, weight)
self.windows[name] = window
# self.windows[name] = image
示例10: show
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def show(image):
import cv2
cv2.namedWindow("view", cv2.WINDOW_AUTOSIZE)
cv2.imshow("view", image)
cv2.waitKey(0)
cv2.destroyWindow("view")
示例11: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def __init__(self, window_name=_DEFAULT_WINDOW_NAME, screen_size=_DEFAULT_SCREEN_SIZE_ID, display_individual_classification=_DEFAULT_DISPLAY_INDIVIDUAL_CLASSIFICATION,
display_graph_ensemble=_DEFAULT_DISPLAY_GRAPH_ENSEMBLE):
"""
Initialize GUI of the FER demo.
:param window_name: (string) The name of the window
:param screen_size: ((int, int)) Tuple of int values for width and height, respectively.
"""
# Screen components
self._fer = None
self._input_image = None
self._background = None
self._plot_arousal = []
self._plot_valence = []
# Screen
self._window_name = window_name
self._screen_size = screen_size - 1
self._width, self._height = FERDemo._SCREEN_SIZE[self._screen_size]
self._display_individual_classification = display_individual_classification
self._display_graph_ensemble = display_graph_ensemble
# Container parameters
self._container_width, self._container_height = (int(self._width // 2), int(self._height))
self._container_center_position = np.array([self._container_width // 2, self._container_height // 2], dtype=np.int)
self._input_container = None
self._output_container = None
self._input_container_initial_position = np.array([0, 0], dtype=np.int)
self._output_container_initial_position = np.array([0, self._width // 2], dtype=np.int)
# Output blocks
self._output_block_height = (self._container_height // FERDemo._BLOCK_NUM_BLOCKS)
self._output_block_height_ensemble = self._container_height
self._output_block_width = self._container_width
# Screen initialization
self._draw_background()
self._screen = self._get_container(0, 0, self._height, self._width)
self._blank_screen()
cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE)
开发者ID:siqueira-hc,项目名称:Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks,代码行数:43,代码来源:fer_demo.py
示例12: drawingDemo
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def drawingDemo():
img = emptyImage()
# 太さ2の直線描画
drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2)
# 太さに-1を指定すると,塗りつぶしになる
drawCircle(img, (300, 100), 80, (0, 255, 0), -1)
# 中と外を両方描画
drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1)
drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3)
# 楕円の描画
drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1)
# ポリゴンの描画
pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32)
drawPolylines(img, pts, True, (255, 100, 100), 5)
# テキストの描画
drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2)
cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE)
cv2.imshow('DrawingDemo', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例13: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def __init__(self, debug=False):
self.debug = debug
if self.debug:
cv2.namedWindow("Stream", cv2.WINDOW_AUTOSIZE)
self.tile_map = self.make_tilemap('firered_tiles.png')
self.tile_text = self.make_tile_text('firered_tiles.txt')
self.ocr_engine = video.OCREngine(self.tile_map, self.tile_text)
示例14: __init__
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def __init__(self, src, window_name=None, org=None):
self.src = src
self.window_name = window_name if window_name else src
cv2.startWindowThread()
cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
if org:
# Set the window position
x, y = org
cv2.moveWindow(self.window_name, x, y)
super().__init__()
示例15: start
# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 别名]
def start(self):
print ("Starting Live object detection, may take a few minutes to initialize...")
self.camera.startStreaming()
self.detector.initializeSession()
if not self.camera.isOpened():
print ("Camera has failed to open")
exit(-1)
elif self.debug:
cv2.namedWindow("Jetson Live Detection", cv2.WINDOW_AUTOSIZE)
while True:
curr_time = time.time()
img = self.camera.getFrame()
scores, boxes, classes, num_detections = self.detector.detect(img)
if self.debug:
self._visualizeDetections(img, scores, boxes, classes, num_detections)
print ("Debug: Running at: " + str(1.0/(time.time() - curr_time)) + " Hz.")
if cv2.waitKey(1) == ord('q'):
break
# throttle to rate
capture_duration = time.time() - curr_time
sleep_time = self.rate - capture_duration
if sleep_time > 0:
time.sleep(sleep_time)
cv2.destroyAllWindows()
self.camera.__del__()
self.detector.__del__()
print ("Exiting...")
return
开发者ID:SteveMacenski,项目名称:jetson_nano_detection_and_tracking,代码行数:37,代码来源:jetson_live_object_detection.py