当前位置: 首页>>代码示例>>Python>>正文


Python cv2.CAP_PROP_BUFFERSIZE属性代码示例

本文整理汇总了Python中cv2.CAP_PROP_BUFFERSIZE属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.CAP_PROP_BUFFERSIZE属性的具体用法?Python cv2.CAP_PROP_BUFFERSIZE怎么用?Python cv2.CAP_PROP_BUFFERSIZE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.CAP_PROP_BUFFERSIZE属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def __init__(self, pipe=0, img_size=416, half=False):
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if pipe == '0':
            pipe = 0  # local camera
        # pipe = 'rtsp://192.168.1.64/1'  # IP camera
        # pipe = 'rtsp://username:password@192.168.1.64/1'  # IP camera with login
        # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa'  # IP traffic camera
        # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg'  # IP golf camera

        # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
        # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink'  # GStreamer

        # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
        # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package  # install help
        # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink"  # GStreamer

        self.pipe = pipe
        self.cap = cv2.VideoCapture(pipe)  # video capture object
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:23,代码来源:datasets.py

示例2: __setup_stream_settings

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def __setup_stream_settings(self):
        # Set compression format
        self.__video_stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
        # Set Buffer size
        # -- Not available in opencv 3.4 -- #
        self.__video_stream.set(cv2.CAP_PROP_BUFFERSIZE, rospy.get_param("~buffer_size"))
        # Set image size
        w, h = rospy.get_param("~frame_size")
        self.__video_stream.set(cv2.CAP_PROP_FRAME_WIDTH, w)
        self.__video_stream.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
        # Set frame rate
        self.__video_stream.set(cv2.CAP_PROP_FPS, self.__frame_rate) 
开发者ID:NiryoRobotics,项目名称:niryo_one_ros,代码行数:14,代码来源:camera_publisher_and_services.py

示例3: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def __init__(self, phone_ip):
        ip_camera_url = 'http://admin:admin@{}:8081/'.format(phone_ip)
        self.cap = cv2.VideoCapture(ip_camera_url)
        # 设置缓存区的大小
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, self.CAP_BUFFER_SIZE)

        for i in range(self.INIT_JUMP_FRAME_NUM):
            ret, img = self.cap.read() 
开发者ID:1zlab,项目名称:1ZLAB_PyEspCar,代码行数:10,代码来源:wifi_camera.py

示例4: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def __init__(self, src=0):
        self.capture = cv2.VideoCapture(src)
        self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)

        # FPS = 1/X
        # X = desired FPS
        self.FPS = 1/30
        self.FPS_MS = int(self.FPS * 1000)

        # Start frame retrieval thread
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True
        self.thread.start() 
开发者ID:ronitsinha,项目名称:speed-detector,代码行数:15,代码来源:threadedcam.py

示例5: _frame_generator

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def _frame_generator(input_source, out_frame, frame_shape, finish_flag):
        """Produces live frames from the input stream"""

        cap = cv2.VideoCapture(input_source)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

        source_fps = cap.get(cv2.CAP_PROP_FPS)
        trg_time_step = 1.0 / float(source_fps)

        while True:
            start_time = time.perf_counter()

            _, frame = cap.read()
            if frame is None:
                break

            with out_frame.get_lock():
                buffer = np.frombuffer(out_frame.get_obj(), dtype=np.uint8)
                np.copyto(buffer.reshape(frame_shape), frame)

            end_time = time.perf_counter()
            elapsed_time = end_time - start_time
            rest_time = trg_time_step - elapsed_time
            if rest_time > 0.0:
                time.sleep(rest_time)

        finish_flag.value = True
        cap.release() 
开发者ID:opencv,项目名称:open_model_zoo,代码行数:30,代码来源:video_stream.py

示例6: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def main ():
	# I think KNN works better than MOG2, specifically with trucks/large vehicles
	# TODO: Block out snowbank where shadows are strongly reflected!
	bg_subtractor = cv2.createBackgroundSubtractorKNN(detectShadows=True)
	car_counter = None

	load_cropped()

	cap = cv2.VideoCapture(road['stream_url'])
	cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)

	cv2.namedWindow('Source Image')
	cv2.setMouseCallback('Source Image', click_and_crop)

	frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
	frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

	frame_number = -1

	while True:
		frame_number += 1
		ret, frame = cap.read()

		if not ret:
			print('Frame capture failed, stopping...')
			break

		if car_counter is None:
			car_counter = VehicleCounter(frame.shape[:2], road, cap.get(cv2.CAP_PROP_FPS), samples=10)

		processed = process_frame(frame_number, frame, bg_subtractor, car_counter)

		cv2.imshow('Source Image', frame)
		cv2.imshow('Processed Image', processed)

		key = cv2.waitKey(WAIT_TIME)

		if key == ord('s'):
			# save rects!
			save_cropped()
		elif key == ord('q') or key == 27:
			break

		# Keep video's speed stable
		# I think that this causes the abrupt jumps in the video
		time.sleep( 1.0 / cap.get(cv2.CAP_PROP_FPS) )


	print('Closing video capture...')
	cap.release()
	cv2.destroyAllWindows()
	print('Done.') 
开发者ID:ronitsinha,项目名称:speed-detector,代码行数:54,代码来源:main.py

示例7: _play

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_BUFFERSIZE [as 别名]
def _play(visualizer_queue, cur_source_id, source_paths, max_image_size, trg_time_step):
        """Produces live frame from the active video source"""

        cap = None
        last_source_id = cur_source_id.value

        while True:
            start_time = time.perf_counter()

            if cur_source_id.value != last_source_id:
                last_source_id = cur_source_id.value
                cap.release()
                cap = None

            source_name, source_path = source_paths[cur_source_id.value]

            if cap is None:
                cap = cv2.VideoCapture(source_path)
                cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

            _, frame = cap.read()
            if frame is None:
                cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                _, frame = cap.read()
                assert frame is not None

            trg_frame_size = list(frame.shape[:2])
            if np.max(trg_frame_size) > max_image_size:
                if trg_frame_size[0] == np.max(trg_frame_size):
                    trg_frame_size[1] = int(float(max_image_size) / float(trg_frame_size[0]) * float(trg_frame_size[1]))
                    trg_frame_size[0] = max_image_size
                else:
                    trg_frame_size[0] = int(float(max_image_size) * float(trg_frame_size[0]) / float(trg_frame_size[1]))
                    trg_frame_size[1] = max_image_size

            frame = cv2.resize(frame, (trg_frame_size[1], trg_frame_size[0]))
            cv2.putText(frame, 'GT Gesture: {}'.format(source_name), (10, frame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            visualizer_queue.put(np.copy(frame), True)

            end_time = time.perf_counter()
            elapsed_time = end_time - start_time
            rest_time = trg_time_step - elapsed_time
            if rest_time > 0.0:
                time.sleep(rest_time) 
开发者ID:opencv,项目名称:open_model_zoo,代码行数:48,代码来源:video_library.py


注:本文中的cv2.CAP_PROP_BUFFERSIZE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。