本文整理匯總了Python中imutils.video.VideoStream方法的典型用法代碼示例。如果您正苦於以下問題:Python video.VideoStream方法的具體用法?Python video.VideoStream怎麽用?Python video.VideoStream使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類imutils.video
的用法示例。
在下文中一共展示了video.VideoStream方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: start_capture
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import VideoStream [as 別名]
def start_capture(self, height=None, width=None, usingPiCamera=IS_RASPBERRY_PI, ):
import imutils
from imutils.video import VideoStream
resolution = (self.height, self.width)
if height:
if width:
resolution = (height, width)
print("Camera Resolution:", resolution)
cf = VideoStream(usePiCamera=usingPiCamera,
resolution=resolution,
framerate=30).start()
self.current_frame = cf
time.sleep(2)
if not usingPiCamera:
frame = imutils.resize(self.current_frame.read(), width=resolution[0], height=resolution[1])
# Stream started, call current_frame.read() to get current frame
示例2: start_capture
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import VideoStream [as 別名]
def start_capture(self, height=None, width=None, usingPiCamera=IS_RASPBERRY_PI, ):
import imutils
from imutils.video import VideoStream
resolution = (self.height, self.width)
if height:
if width:
resolution = (height, width)
cf = VideoStream(usePiCamera=usingPiCamera,
resolution=resolution,
framerate=32).start()
self.current_frame = cf
time.sleep(2)
if not usingPiCamera:
frame = imutils.resize(self.current_frame.read(), width=resolution[0])
# Stream started, call current_frame.read() to get current frame
示例3: main
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import VideoStream [as 別名]
def main():
client = get_mqtt_client()
client.connect(MQTT_BROKER, port=MQTT_PORT)
time.sleep(4) # Wait for connection setup to complete
client.loop_start()
# Open camera
camera = VideoStream(src=VIDEO_SOURCE, framerate=FPS).start()
time.sleep(2) # Webcam light should come on if using one
while True:
frame = camera.read()
np_array_RGB = opencv2matplotlib(frame) # Convert to RGB
image = Image.fromarray(np_array_RGB) # PIL image
byte_array = pil_image_to_byte_array(image)
client.publish(MQTT_TOPIC_CAMERA, byte_array, qos=MQTT_QOS)
now = get_now_string()
print(f"published frame on topic: {MQTT_TOPIC_CAMERA} at {now}")
time.sleep(1 / FPS)
示例4: get_frame
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import VideoStream [as 別名]
def get_frame(vid_stream, stream):
"""grab the current video frame"""
frame = vid_stream.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if stream else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
return None
else:
frame = imutils.resize(frame, width=600)
return frame
示例5: main
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import VideoStream [as 別名]
def main():
"""Handles inpur from file or stream, tests the tracker class"""
arg_parse = argparse.ArgumentParser()
arg_parse.add_argument("-v", "--video",
help="path to the (optional) video file")
args = vars(arg_parse.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space. NB the hue range in
# opencv is 180, normally it is 360
green_lower = (50, 50, 50)
green_upper = (70, 255, 255)
# red_lower = (0, 50, 50)
# red_upper = (20, 255, 255)
# blue_lower = (110, 50, 50)
# upper_blue = (130, 255, 255)
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vid_stream = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vid_stream = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
stream = args.get("video", False)
frame = get_frame(vid_stream, stream)
height, width = frame.shape[0], frame.shape[1]
greentracker = Tracker(height, width, green_lower, green_upper)
# keep looping until no more frames
more_frames = True
while more_frames:
greentracker.track(frame)
frame = greentracker.draw_arrows(frame)
show(frame)
frame = get_frame(vid_stream, stream)
if frame is None:
more_frames = False
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vid_stream.stop()
# otherwise, release the camera
else:
vid_stream.release()
# close all windows
cv2.destroyAllWindows()