當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.WINDOW_AUTOSIZE屬性代碼示例

本文整理匯總了Python中cv2.WINDOW_AUTOSIZE屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.WINDOW_AUTOSIZE屬性的具體用法?Python cv2.WINDOW_AUTOSIZE怎麽用?Python cv2.WINDOW_AUTOSIZE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.WINDOW_AUTOSIZE屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: run

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
開發者ID:11ze,項目名稱:The-chat-room,代碼行數:26,代碼來源:vachat.py

示例2: preview

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def preview(self):
        """ Blocking function. Opens OpenCV window to display stream. """
        self.connect()
        win_name = 'RTSP'
        cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
        cv2.moveWindow(win_name, 20, 20)

        while True:
            cv2.imshow(win_name, self.get_frame())
            # if self._latest is not None:
            #    cv2.imshow(win_name,self._latest)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        cv2.waitKey()
        cv2.destroyAllWindows()
        cv2.waitKey() 
開發者ID:Benehiko,項目名稱:ReolinkCameraAPI,代碼行數:18,代碼來源:RtspClient.py

示例3: parse_grid

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def parse_grid(path):
    original = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
    processed = pre_process_image(original)
    
#    cv2.namedWindow('processed',cv2.WINDOW_AUTOSIZE)
#    processed_img = cv2.resize(processed, (500, 500))          # Resize image
#    cv2.imshow('processed', processed_img)
    
    corners = find_corners_of_largest_polygon(processed)
    cropped = crop_and_warp(original, corners)
    
#    cv2.namedWindow('cropped',cv2.WINDOW_AUTOSIZE)
#    cropped_img = cv2.resize(cropped, (500, 500))              # Resize image
#    cv2.imshow('cropped', cropped_img)
    
    squares = infer_grid(cropped)
#    print(squares)
    digits = get_digits(cropped, squares, 28)
#    print(digits)
    final_image = show_digits(digits)
    return final_image 
開發者ID:aakashjhawar,項目名稱:SolveSudoku,代碼行數:23,代碼來源:SudokuExtractor.py

示例4: cv2_show_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def cv2_show_image(window_name, image,
                   size_wh=None, location_xy=None):
    """Helper function for specifying window size and location when
    displaying images with cv2.

    Args:
        window_name: str window name
        image: ndarray image to display
        size_wh: window size (w, h)
        location_xy: window location (x, y)
    """

    if size_wh is not None:
        cv2.namedWindow(window_name,
                        cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_NORMAL)
        cv2.resizeWindow(window_name, *size_wh)
    else:
        cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    if location_xy is not None:
        cv2.moveWindow(window_name, *location_xy)

    cv2.imshow(window_name, image) 
開發者ID:kujason,項目名稱:ip_basic,代碼行數:25,代碼來源:vis_utils.py

示例5: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def __init__(self, video_out=None, draw_frames=True, video_out_fourcc="DIVX", video_out_fps=2):
        """
        A template class to annotate and save the processed frames. It can also save the annotated frames in a video
        file and/or display them in a new window. The :meth:`~ethoscope.drawers.drawers.BaseDrawer._annotate_frame`
        abstract method defines how frames are annotated.

        :param video_out: The path to the output file (.avi)
        :type video_out: str
        :param draw_frames: Whether frames should be displayed on the screen (a new window will be created).
        :type draw_frames: bool
        :param video_out_fourcc: When setting ``video_out``, this defines the codec used to save the output video (see `fourcc <http://www.fourcc.org/codecs.php>`_)
        :type video_out_fourcc: str
        :param video_out_fps: When setting ``video_out``, this defines the output fps. typically, the same as the input fps.
        :type video_out_fps: float
        """
        self._video_out = video_out
        self._draw_frames= draw_frames
        self._video_writer = None
        self._window_name = "ethoscope_" + str(os.getpid())
        self._video_out_fourcc = video_out_fourcc
        self._video_out_fps = video_out_fps
        if draw_frames:
            cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE)
        self._last_drawn_frame = None 
開發者ID:gilestrolab,項目名稱:ethoscope,代碼行數:26,代碼來源:drawers.py

示例6: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'):
    self.ale = ALEInterface()
    self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
    self.ale.setInt("random_seed",123)
    self.ale.setInt("frame_skip",frameskip)
    romfile = str(ROM_PATH)+str(rom_name)
    if not os.path.exists(romfile):
      print('No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.')
    self.ale.loadROM(romfile)
    self.legal_actions = self.ale.getMinimalActionSet()
    self.action_map = dict()
    self.windowname = windowname
    for i in range(len(self.legal_actions)):
      self.action_map[self.legal_actions[i]] = i

    # print(self.legal_actions)
    self.screen_width,self.screen_height = self.ale.getScreenDims()
    print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
    self.vis = vis
    if vis:
      cv2.startWindowThread()
      cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing 
開發者ID:rdadolf,項目名稱:fathom,代碼行數:24,代碼來源:emulator.py

示例7: show_camera

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def show_camera():
    # To flip the image, modify the flip_method parameter (0 and 2 are the most common)
    print(gstreamer_pipeline(flip_method=0))
    cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
    if cap.isOpened():
        window_handle = cv2.namedWindow('CSI Camera', cv2.WINDOW_AUTOSIZE)
        # Window 
        while cv2.getWindowProperty('CSI Camera',0) >= 0:
            ret_val, img = cap.read();
            cv2.imshow('CSI Camera',img)
	    # This also acts as 
            keyCode = cv2.waitKey(30) & 0xff
            # Stop the program on the ESC key
            if keyCode == 27:
               break
        cap.release()
        cv2.destroyAllWindows()
    else:
        print('Unable to open camera') 
開發者ID:joakimeriksson,項目名稱:ai-smarthome,代碼行數:21,代碼來源:simple-camera.py

示例8: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def main(argv):

   if (len(sys.argv) < 3):
      print 'Not enough parameters'
      print 'Usage:\nmatch_template_demo.py <image_name> <template_name> [<mask_name>]'
      return -1

   ## [load_image]
   global img
   global templ
   img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)
   templ = cv2.imread(sys.argv[2], cv2.IMREAD_COLOR)

   if (len(sys.argv) > 3):
      global use_mask
      use_mask = True
      global mask
      mask = cv2.imread( sys.argv[3], cv2.IMREAD_COLOR )

   if ((img is None) or (templ is None) or (use_mask and (mask is None))):
      print 'Can\'t read one of the images'
      return -1
   ## [load_image]

   ## [create_windows]
   cv2.namedWindow( image_window, cv2.WINDOW_AUTOSIZE )
   cv2.namedWindow( result_window, cv2.WINDOW_AUTOSIZE )
   ## [create_windows]

   ## [create_trackbar]
   trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED'
   cv2.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod )
   ## [create_trackbar]

   MatchingMethod(match_method)

   ## [wait_key]
   cv2.waitKey(0)
   return 0
   ## [wait_key] 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:42,代碼來源:match_template.py

示例9: add

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def add(self, name, image, weight=1):
        '''
        權重,越高,圖片顯示越大
        :return:
        '''
        cv2.namedWindow(name, flags=cv2.WINDOW_AUTOSIZE)
        window = Window(name, image, weight)
        self.windows[name] = window
        # self.windows[name] = image 
開發者ID:makelove,項目名稱:OpenCV-Python-Tutorial,代碼行數:11,代碼來源:opencv_windows_management.py

示例10: show

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def show(image):
        import cv2
        cv2.namedWindow("view", cv2.WINDOW_AUTOSIZE)
        cv2.imshow("view", image)
        cv2.waitKey(0)
        cv2.destroyWindow("view") 
開發者ID:tonybeltramelli,項目名稱:pix2code,代碼行數:8,代碼來源:Utils.py

示例11: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def __init__(self, window_name=_DEFAULT_WINDOW_NAME, screen_size=_DEFAULT_SCREEN_SIZE_ID, display_individual_classification=_DEFAULT_DISPLAY_INDIVIDUAL_CLASSIFICATION,
                 display_graph_ensemble=_DEFAULT_DISPLAY_GRAPH_ENSEMBLE):
        """
        Initialize GUI of the FER demo.
        :param window_name: (string) The name of the window
        :param screen_size: ((int, int)) Tuple of int values for width and height, respectively.
        """

        # Screen components
        self._fer = None
        self._input_image = None
        self._background = None
        self._plot_arousal = []
        self._plot_valence = []

        # Screen
        self._window_name = window_name
        self._screen_size = screen_size - 1
        self._width, self._height = FERDemo._SCREEN_SIZE[self._screen_size]
        self._display_individual_classification = display_individual_classification
        self._display_graph_ensemble = display_graph_ensemble

        # Container parameters
        self._container_width, self._container_height = (int(self._width // 2), int(self._height))
        self._container_center_position = np.array([self._container_width // 2, self._container_height // 2], dtype=np.int)
        self._input_container = None
        self._output_container = None
        self._input_container_initial_position = np.array([0, 0], dtype=np.int)
        self._output_container_initial_position = np.array([0, self._width // 2], dtype=np.int)

        # Output blocks
        self._output_block_height = (self._container_height // FERDemo._BLOCK_NUM_BLOCKS)
        self._output_block_height_ensemble = self._container_height
        self._output_block_width = self._container_width

        # Screen initialization
        self._draw_background()
        self._screen = self._get_container(0, 0, self._height, self._width)
        self._blank_screen()

        cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE) 
開發者ID:siqueira-hc,項目名稱:Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks,代碼行數:43,代碼來源:fer_demo.py

示例12: drawingDemo

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def drawingDemo():
    img = emptyImage()

    # 太さ2の直線描畫
    drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2)

    # 太さに-1を指定すると,塗りつぶしになる
    drawCircle(img, (300, 100), 80, (0, 255, 0), -1)

    # 中と外を両方描畫
    drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1)
    drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3)

    # 楕円の描畫
    drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1)

    # ポリゴンの描畫
    pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32)
    drawPolylines(img, pts, True, (255, 100, 100), 5)

    # テキストの描畫
    drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2)

    cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('DrawingDemo', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
開發者ID:tody411,項目名稱:PyIntroduction,代碼行數:29,代碼來源:drawing.py

示例13: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def __init__(self, debug=False):
        self.debug = debug
        if self.debug:
            cv2.namedWindow("Stream", cv2.WINDOW_AUTOSIZE)
        self.tile_map = self.make_tilemap('firered_tiles.png')
        self.tile_text = self.make_tile_text('firered_tiles.txt')
        self.ocr_engine = video.OCREngine(self.tile_map, self.tile_text) 
開發者ID:rmmh,項目名稱:pokr,代碼行數:9,代碼來源:ocr.py

示例14: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def __init__(self, src, window_name=None, org=None):
        self.src = src
        self.window_name = window_name if window_name else src

        cv2.startWindowThread()
        cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
        if org:
            # Set the window position
            x, y = org
            cv2.moveWindow(self.window_name, x, y)

        super().__init__() 
開發者ID:jagin,項目名稱:detectron2-pipeline,代碼行數:14,代碼來源:display_video.py

示例15: start

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import WINDOW_AUTOSIZE [as 別名]
def start(self):
        print ("Starting Live object detection, may take a few minutes to initialize...")
        self.camera.startStreaming()
        self.detector.initializeSession()

        if not self.camera.isOpened():
            print ("Camera has failed to open")
            exit(-1)
        elif self.debug:
            cv2.namedWindow("Jetson Live Detection", cv2.WINDOW_AUTOSIZE)
    
        while True:
            curr_time = time.time()

            img = self.camera.getFrame()
            scores, boxes, classes, num_detections = self.detector.detect(img)

            if self.debug:
                self._visualizeDetections(img, scores, boxes, classes, num_detections)
                print ("Debug: Running at: " + str(1.0/(time.time() - curr_time)) + " Hz.")

            if cv2.waitKey(1) == ord('q'):
                break

            # throttle to rate
            capture_duration = time.time() - curr_time
            sleep_time = self.rate - capture_duration
            if sleep_time > 0:
                time.sleep(sleep_time)
        
        cv2.destroyAllWindows()
        self.camera.__del__()
        self.detector.__del__()
        print ("Exiting...")
        return 
開發者ID:SteveMacenski,項目名稱:jetson_nano_detection_and_tracking,代碼行數:37,代碼來源:jetson_live_object_detection.py


注:本文中的cv2.WINDOW_AUTOSIZE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。