當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.CAP_PROP_BUFFERSIZE屬性代碼示例

本文整理匯總了Python中cv2.CAP_PROP_BUFFERSIZE屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CAP_PROP_BUFFERSIZE屬性的具體用法?Python cv2.CAP_PROP_BUFFERSIZE怎麽用?Python cv2.CAP_PROP_BUFFERSIZE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.CAP_PROP_BUFFERSIZE屬性的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def __init__(self, pipe=0, img_size=416, half=False):
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if pipe == '0':
            pipe = 0  # local camera
        # pipe = 'rtsp://192.168.1.64/1'  # IP camera
        # pipe = 'rtsp://username:password@192.168.1.64/1'  # IP camera with login
        # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa'  # IP traffic camera
        # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg'  # IP golf camera

        # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
        # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink'  # GStreamer

        # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
        # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package  # install help
        # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink"  # GStreamer

        self.pipe = pipe
        self.cap = cv2.VideoCapture(pipe)  # video capture object
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:23,代碼來源:datasets.py

示例2: __setup_stream_settings

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def __setup_stream_settings(self):
        # Set compression format
        self.__video_stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
        # Set Buffer size
        # -- Not available in opencv 3.4 -- #
        self.__video_stream.set(cv2.CAP_PROP_BUFFERSIZE, rospy.get_param("~buffer_size"))
        # Set image size
        w, h = rospy.get_param("~frame_size")
        self.__video_stream.set(cv2.CAP_PROP_FRAME_WIDTH, w)
        self.__video_stream.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
        # Set frame rate
        self.__video_stream.set(cv2.CAP_PROP_FPS, self.__frame_rate) 
開發者ID:NiryoRobotics,項目名稱:niryo_one_ros,代碼行數:14,代碼來源:camera_publisher_and_services.py

示例3: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def __init__(self, phone_ip):
        ip_camera_url = 'http://admin:admin@{}:8081/'.format(phone_ip)
        self.cap = cv2.VideoCapture(ip_camera_url)
        # 設置緩存區的大小
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, self.CAP_BUFFER_SIZE)

        for i in range(self.INIT_JUMP_FRAME_NUM):
            ret, img = self.cap.read() 
開發者ID:1zlab,項目名稱:1ZLAB_PyEspCar,代碼行數:10,代碼來源:wifi_camera.py

示例4: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def __init__(self, src=0):
        self.capture = cv2.VideoCapture(src)
        self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)

        # FPS = 1/X
        # X = desired FPS
        self.FPS = 1/30
        self.FPS_MS = int(self.FPS * 1000)

        # Start frame retrieval thread
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True
        self.thread.start() 
開發者ID:ronitsinha,項目名稱:speed-detector,代碼行數:15,代碼來源:threadedcam.py

示例5: _frame_generator

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def _frame_generator(input_source, out_frame, frame_shape, finish_flag):
        """Produces live frames from the input stream"""

        cap = cv2.VideoCapture(input_source)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

        source_fps = cap.get(cv2.CAP_PROP_FPS)
        trg_time_step = 1.0 / float(source_fps)

        while True:
            start_time = time.perf_counter()

            _, frame = cap.read()
            if frame is None:
                break

            with out_frame.get_lock():
                buffer = np.frombuffer(out_frame.get_obj(), dtype=np.uint8)
                np.copyto(buffer.reshape(frame_shape), frame)

            end_time = time.perf_counter()
            elapsed_time = end_time - start_time
            rest_time = trg_time_step - elapsed_time
            if rest_time > 0.0:
                time.sleep(rest_time)

        finish_flag.value = True
        cap.release() 
開發者ID:opencv,項目名稱:open_model_zoo,代碼行數:30,代碼來源:video_stream.py

示例6: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def main ():
	# I think KNN works better than MOG2, specifically with trucks/large vehicles
	# TODO: Block out snowbank where shadows are strongly reflected!
	bg_subtractor = cv2.createBackgroundSubtractorKNN(detectShadows=True)
	car_counter = None

	load_cropped()

	cap = cv2.VideoCapture(road['stream_url'])
	cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)

	cv2.namedWindow('Source Image')
	cv2.setMouseCallback('Source Image', click_and_crop)

	frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
	frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

	frame_number = -1

	while True:
		frame_number += 1
		ret, frame = cap.read()

		if not ret:
			print('Frame capture failed, stopping...')
			break

		if car_counter is None:
			car_counter = VehicleCounter(frame.shape[:2], road, cap.get(cv2.CAP_PROP_FPS), samples=10)

		processed = process_frame(frame_number, frame, bg_subtractor, car_counter)

		cv2.imshow('Source Image', frame)
		cv2.imshow('Processed Image', processed)

		key = cv2.waitKey(WAIT_TIME)

		if key == ord('s'):
			# save rects!
			save_cropped()
		elif key == ord('q') or key == 27:
			break

		# Keep video's speed stable
		# I think that this causes the abrupt jumps in the video
		time.sleep( 1.0 / cap.get(cv2.CAP_PROP_FPS) )


	print('Closing video capture...')
	cap.release()
	cv2.destroyAllWindows()
	print('Done.') 
開發者ID:ronitsinha,項目名稱:speed-detector,代碼行數:54,代碼來源:main.py

示例7: _play

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 別名]
def _play(visualizer_queue, cur_source_id, source_paths, max_image_size, trg_time_step):
        """Produces live frame from the active video source"""

        cap = None
        last_source_id = cur_source_id.value

        while True:
            start_time = time.perf_counter()

            if cur_source_id.value != last_source_id:
                last_source_id = cur_source_id.value
                cap.release()
                cap = None

            source_name, source_path = source_paths[cur_source_id.value]

            if cap is None:
                cap = cv2.VideoCapture(source_path)
                cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

            _, frame = cap.read()
            if frame is None:
                cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                _, frame = cap.read()
                assert frame is not None

            trg_frame_size = list(frame.shape[:2])
            if np.max(trg_frame_size) > max_image_size:
                if trg_frame_size[0] == np.max(trg_frame_size):
                    trg_frame_size[1] = int(float(max_image_size) / float(trg_frame_size[0]) * float(trg_frame_size[1]))
                    trg_frame_size[0] = max_image_size
                else:
                    trg_frame_size[0] = int(float(max_image_size) * float(trg_frame_size[0]) / float(trg_frame_size[1]))
                    trg_frame_size[1] = max_image_size

            frame = cv2.resize(frame, (trg_frame_size[1], trg_frame_size[0]))
            cv2.putText(frame, 'GT Gesture: {}'.format(source_name), (10, frame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            visualizer_queue.put(np.copy(frame), True)

            end_time = time.perf_counter()
            elapsed_time = end_time - start_time
            rest_time = trg_time_step - elapsed_time
            if rest_time > 0.0:
                time.sleep(rest_time) 
開發者ID:opencv,項目名稱:open_model_zoo,代碼行數:48,代碼來源:video_library.py


注:本文中的cv2.CAP_PROP_BUFFERSIZE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。