当前位置: 首页>>代码示例>>Python>>正文


Python video.VideoStream类代码示例

本文整理汇总了Python中imutils.video.VideoStream的典型用法代码示例。如果您正苦于以下问题:Python VideoStream类的具体用法?Python VideoStream怎么用?Python VideoStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了VideoStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Video

class Video():
    def __init__(self):
        self.vs = VideoStream(usePiCamera=1 > 0).start()
        time.sleep(2.0)
        self.currentFrame = np.array([])
        self.raw_img = np.array([])


    def captureRawFrame(self):
        """
        capture frame and reverse RBG BGR and return opencv image
        """
        rawFrame = self.vs.read()
        rawFrame = imutils.resize(rawFrame, width=640)
        self.raw_img = rawFrame
        #return rawFrame

    def convertFrame(self):
        """
        converts frame to format suitable for QtGui
        """
        try:
            self.currentFrame = cv2.cvtColor(self.raw_img, cv2.COLOR_BGR2RGB)
            height, width = self.currentFrame.shape[:2]
            img = QtGui.QImage(self.currentFrame,
                               width,
                               height,
                               QtGui.QImage.Format_RGB888)
            img = QtGui.QPixmap.fromImage(img)
            #self.previousFrame = self.currentFrame
            img = img.scaledToHeight(480)
            img = img.scaledToWidth(360)
            return img
        except:
            return None
开发者ID:cyrilli,项目名称:pose-estimation_python-opencv,代码行数:35,代码来源:Video.py

示例2: __init__

 def __init__(self):
     # initialize the video stream and allow the camera
     # sensor to warmup
     self.vs = VideoStream(usePiCamera=1 > 0).start()
     time.sleep(2.0)
     self.currentFrame = np.array([])
     self.raw_img = np.array([])
     
     self.writer = None
     (h, w) = (None, None)
开发者ID:cyrilli,项目名称:pose-estimation_python-opencv,代码行数:10,代码来源:recordVideo.py

示例3: main

def main():
	global frame, key
	# initialize the camera and grab a reference to the raw camera capture
	wdth = int(math.floor(360))
	hgth = int(math.floor(800))
	camera = VideoStream(usePiCamera=True,resolution=(wdth,hgth)).start()
	time.sleep(2.0)
	fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	writer = None
	(h,w) = (None, None)
	# setup the mouse callback
	cv2.startWindowThread()
	cv2.namedWindow("Detection")
	cv2.setMouseCallback("Detection",mouseOn)
	# keep looping over the frames
	#for frame2 in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	while True:
		frame = camera.read();
		frame = cv2.transpose(frame);
		frame = cv2.flip(frame,1)
		timestamp = datetime.datetime.now()
		ts = timestamp.strftime("%d/%m/%Y %H:%M:%S")
		cv2.putText(frame,ts,(10,frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.35,(0,255,0),1)
		if writer is None:
			(h,w) = frame.shape[:2]
			writer = cv2.VideoWriter("/media/usb/test_" + timestamp.strftime("%d_%m_%Y_%H%M") + ".avi", fourcc,5,(w,h), True)
		writer.write(frame)
		cv2.imshow("Detection", frame);
		#cv2.setMouseCallback("Detection",mouseOn)
		#key = cv2.waitKey(10) & 0xFF
		# if the 'q' key is pressed, stop the loop
		if key == ord("q"): #cv2.EVENT_LBUTTONDOWN: #ord("q"):
	#		cv2.destroyAllWindows()
	#		camera.stop()
			break
	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	camera.stop()
开发者ID:kel85uk,项目名称:RaspberryPi_Projects,代码行数:38,代码来源:track.py

示例4: recordVideo

class recordVideo():
    def __init__(self):
        # initialize the video stream and allow the camera
        # sensor to warmup
        self.vs = VideoStream(usePiCamera=1 > 0).start()
        time.sleep(2.0)
        self.currentFrame = np.array([])
        self.raw_img = np.array([])
        
        self.writer = None
        (h, w) = (None, None)
        
    def captureRawFrame(self):
        """
        capture frame and reverse RBG BGR and return opencv image, and also record the video
        """
        rawFrame = self.vs.read()
        rawFrame = imutils.resize(rawFrame, width=640)
        self.raw_img = rawFrame
        #return rawFrame

    def initRecord(self):
        if self.writer == None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            #(h, w) = self.raw_img.shape[:2]
            self.writer = cv2.VideoWriter('./demoVideo/'+str(int(time.time()))+'.avi', cv2.cv.FOURCC(*"XVID"), 15,
			(640 , 480 ), True)
    def record(self):
        # write the output frame to file
        self.writer.write(self.raw_img)

    def convertFrame(self):
        """
        converts frame to format suitable for QtGui
        """
        try:
            self.currentFrame = cv2.cvtColor(self.raw_img, cv2.COLOR_BGR2RGB)
            height, width = self.currentFrame.shape[:2]
            img = QtGui.QImage(self.currentFrame,
                               width,
                               height,
                               QtGui.QImage.Format_RGB888)
            img = QtGui.QPixmap.fromImage(img)
            #self.previousFrame = self.currentFrame
            img = img.scaledToHeight(480)
            img = img.scaledToWidth(360)
            return img
        except:
            return None
开发者ID:cyrilli,项目名称:pose-estimation_python-opencv,代码行数:50,代码来源:recordVideo.py

示例5: VideoStream

import sys
import time
from imutils.video import VideoStream
import imutils

# define the lower and upper boundaries of the red in HSV
redLower1 = (0, 100, 100)
redUpper1 = (10, 255, 255)
redLower2 = (160, 100, 100)
redUpper2 = (179, 255, 255)

# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
(dX, dY) = (0, 0)

video_stream = VideoStream(usePiCamera=False, resolution=(640,480), framerate=32).start()
time.sleep(2)

# keep looping
while True:
	# grab the current frame
	# image = video_stream.read()
	frame = video_stream.read()

	# resize the frame, blur it, and convert it to the HSV
	# color space
	# frame = imutils.resize(image, width=400)
	blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	# construct a mask for the color "green", then perform
开发者ID:coffeecold,项目名称:VideoTracker,代码行数:31,代码来源:object_tracker.py

示例6: main

def main():
    # construction des arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p", "--prototxt", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.prototxt.txt",
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.caffemodel",
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.6,
        help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # initialiser la liste des objets entrainés par MobileNet SSD 
    # création du contour de détection avec une couleur attribuée au hasard pour chaque objet
    CLASSES = ["arriere-plan", "avion", "velo", "oiseau", "bateau",
        "bouteille", "autobus", "voiture", "chat", "chaise", "vache", "table",
        "chien", "cheval", "moto", "personne", "plante", "mouton",
        "sofa", "train", "moniteur"]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    pygame.mixer.init()

    # chargement des fichiers depuis le répertoire de stockage 
    print(" ...chargement du modèle...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialiser la caméra du pi, attendre 2s pour la mise au point ,
    # initialiser le compteur FPS
    print("...démarrage de la Picamera...")
    vs = VideoStream(usePiCamera=True, resolution=(1600, 1200)).start()
    time.sleep(2.0)
    #fps = FPS().start()

    # boucle principale du flux vidéo
    while True:
        # récupération du flux vidéo, redimension 
        # afin d'afficher au maximum 800 pixels 
        frame = vs.read()
        frame = imutils.resize(frame, width=800)

        # récupération des dimensions et transformation en collection d'images
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)

        # determiner la détection et la prédiction 
        net.setInput(blob)
        detections = net.forward()

        # boucle de détection
        list_objects = []
        for i in np.arange(0, detections.shape[2]):
            # calcul de la probabilité de l'objet détecté en fonction de la prédiction
            confidence = detections[0, 0, i, 2]
            
            # supprimer les détections faibles inférieures à la probabilité minimale
            if confidence > args["confidence"]:
                # extraire l'index du type d'objet détecté
                # calcul des coordonnées de la fenêtre de détection 
                idx = int(detections[0, 0, i, 1])
                #box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                #(startX, startY, endX, endY) = box.astype("int")

                # creation du contour autour de l'objet détecté
                # insertion de la prédiction de l'objet détecté 
                #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
                #y = startY - 15 if startY - 15 > 15 else startY + 15
                #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                
                # enregistrement de l'image détectée 
                #cv2.imwrite("detection.png", frame)
                obj = CLASSES[idx]
                if obj not in list_objects:
                    list_objects.append(CLASSES[idx])
        
        # affichage du flux vidéo dans une fenètre 
        #cv2.imshow("Frame", frame)
        #key = cv2.waitKey(1) & 0xFF  # ligne necessaire pour l'affichage dans la frame

        # Pronounce the objects seen
        print(list_objects)
        for anobject in list_objects:
            path_to_sound = "/home/pi/Kenobi/recognition/vocabulary/" + anobject + ".ogg"
            if os.path.isfile(path_to_sound):
                pygame.mixer.music.load(path_to_sound)
                pygame.mixer.music.play()
                # Play until end of music file
                while pygame.mixer.music.get_busy() == True:
                    pygame.time.Clock().tick(10)

        # la touche q permet d'interrompre la boucle principale
        #if key == ord("q"):
        #   break

        # mise à jour du FPS 
        #fps.update()

    # arret du compteur et affichage des informations dans la console
    #fps.stop()
    #print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    #print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
#.........这里部分代码省略.........
开发者ID:SebGeek,项目名称:Kenobi,代码行数:101,代码来源:detect_objects.py

示例7: __init__

 def __init__(self):
     self.vs = VideoStream(usePiCamera=1 > 0).start()
     time.sleep(2.0)
     self.currentFrame = np.array([])
     self.raw_img = np.array([])
开发者ID:cyrilli,项目名称:pose-estimation_python-opencv,代码行数:5,代码来源:Video.py

示例8: deque

# define the lower and upper boundaries of the "green"
# ball in the HSV color space
greenLower = (24, 116, 137)
greenUpper = (36, 255, 255)

# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""

# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
	vs = VideoStream(usePiCamera=1).start()
	time.sleep(2.0)	

# otherwise, grab a reference to the video file
else:
	vs = cv2.VideoCapture(args["video"])

# keep looping
while True:
	# grab the current frame
	if args.get("video"):
		(grabbed, frame) = vs.read()
	else:
		frame = vs.read()

	# if we are viewing a video and we did not grab a frame,
开发者ID:PaulSolheim,项目名称:Robotkurs,代码行数:31,代码来源:tennis_ball.py

示例9: Matcher

import time
import cv2
import imutils
from imutils.video import VideoStream


from matcher import Matcher

matcher = Matcher([("fau-logo", "./templates/fau-logo.png"),
                   ("first-logo", "./templates/first-logo.jpg"),
                   ("nextera-logo", "./templates/nextera-energy-logo.jpg"),
                   ("techgarage-logo", "./templates/techgarage-logo.png")
                   ], min_keypoints_pct_match=8)

cam = VideoStream(usePiCamera=False).start()

cnt = 0
while True:
    img = cam.read()
    cv2.imshow("Pic", img)
    print matcher.match(img)
    key = cv2.waitKey(10)
    if key == ord('q'):
       break

cam.stop()
cv2.destroyAllWindows()
开发者ID:Rchalla769,项目名称:Drones,代码行数:27,代码来源:test_matcher_rpi.py

示例10: VideoStream

import numpy as np, cv2, datetime, time
from imutils.video import VideoStream
import imutils
import argparse

#Argument parser to select picamera or USB webcamera
ap=argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
	help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-v", "--video", help="path to video file")
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
args=vars(ap.parse_args())

camera = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2)   #camera start time

while True:
        #Read the frame
        frame = camera.read()        
        #Reshape to 400 pixel width
	frame = imutils.resize(frame, width=400)
        #Display frame        
        cv2.imshow('OrigFrame',frame)

        #Press 'q' to quit
        key=cv2.waitKey(1)& 0xFF
        if key==ord("q"):
               break

#close the imshow window
cv2.destroyAllWindows()
开发者ID:srajend2,项目名称:ECE590--Final_Project,代码行数:31,代码来源:Camera_Display.py

示例11: vars

import warnings
import json
import cv2
from tempimage import TempImage

# Parse arguments from JSON config file
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to the JSON configuration file")
args = vars(ap.parse_args())

warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))

# initialize the video stream and allow the cammera sensor to warmup
#vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
vs = VideoStream(usePiCamera=True, framerate=conf["fps"], resolution=tuple(conf["resolution"])).start()
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0

# loop over the frames from the video stream
while True:
    motionDetected = False
	# grab the frame from the threaded video stream and resize it
    # resize the frame, convert it to grayscale, and blur it
    frame = vs.read()
    analysisFrame = imutils.resize(frame, width=conf["opencv_image_width"])
    gray = cv2.cvtColor(analysisFrame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)
开发者ID:gitgc,项目名称:pi_motion_camera,代码行数:30,代码来源:surveillance.py

示例12: print

else:
    if args['rangefilter'].upper() == "RGB":
        print("Error! RGB is currently unsupported, sorry!")
    else:
        color_range = args['color']
        colorLower = color_range[0:3]
        colorUpper = color_range[3:6]

video_extensions = ("3g2", "3gp", "asf", "asx", "avi", "flv", "m4v", "mov", "mp4", "mpg", "rm", "swf", "vob", "wmv")
if str(source.endswith(video_extensions)):
    video = True
else:
    video = False

# created a threaded video stream
vs = VideoStream(src=args["source"]).start()

while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    (grabbed, frame) = vs.read()
    if video and not grabbed:
        break

    # resize the frame, blur it, and convert it to the HSV
    # color space
    frame = imutils.resize(frame, width=600)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # construct a mask for the set color, then perform
开发者ID:Kriskras99,项目名称:fly-py,代码行数:31,代码来源:main.py

示例13: viewCameraPi

    def viewCameraPi(self):

        #text = 'This is a message from app to inform that app start running now!'
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone1,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone2,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        print("START SCRIPT AND MAJOR WARNING!")
        statusSMS = 'delivered'
        if((statusSMS != 'failed') and (statusSMS != 'undelivered')):
            #camera = PiCamera()
            #camera.resolution = ( 640, 480)
            #camera.framerate = 32

            #rawCapture = PiRGBArray(camera, size=( 640, 480))
            #self.sumMSE = self.sumSSIM = self.avgMSE = self.avgSSIM = 0
            self.tempHour = datetime.datetime.now().hour
            self.tempMinute = datetime.datetime.now().minute
            self.warmup = 0

            vs = VideoStream(usePiCamera=1,resolution=(640,480)).start()
            time.sleep(1.2)

            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = None
            (h, w) = (None, None)
            zeros = None

            print ("view camera")

            while True:
                self.warmup+=1
                if(self.warmup >=5):
                    frame = vs.read()
                    if writer is None:
                        # store the image dimensions, initialzie the video writer,
                        # and construct the zeros array
                        (h, w) = frame.shape[:2]
                        writer = cv2.VideoWriter('exampleTH3.avi', fourcc, 20,
                            (w, h), True)

                    writer.write(frame)

                    self.curImage = frame
                    self.getDefImagePerHours()
                    #write xml
                    print("process frame thu {}".format(self.warmup-4))
                    self.writeXML()

                    #tat chuong trinh sau 5 phut:
                    if(datetime.datetime.now().minute - self.tempMinute >5):
                        self.final()
                        break


            # for frame in camera.capture_continuous( rawCapture, format("bgr"), use_video_port = True):
            #
            #     self.curImage = frame.array
            #     frame = vs.read()
            #     self.warmup +=1
            #     if(self.warmup >=5):
            #         self.getDefImagePerHours()
            #         # self.getDefImagePerTenMinutes()
            #
            #         #write xml
            #         print("process frame thu {}".format(self.warmup-4))
            #         self.writeXML()
            #
            #         #warning
            #         # self.warning()
            #
            #         #tat chuong trinh sau 5 phut:
            #         if(datetime.datetime.now().minute - self.tempMinute >5):
            #             self.final()
            #             break
            #     #neu la 16h, script se tu tat
            #     # tempBreak = datetime.datetime.now().hour
            #     # if(( tempBreak == 0) or (tempBreak == 6) or (tempBreak == 18) or (tempBreak == 12)):
            #     #     if(datetime.datetime.now().minute == 0):
            #     #         if((datetime.datetime.now().second >= 0) and (datetime.datetime.now().second <=3)):
            #     #             self.final()
            #     #             self.__init__(tempBreak)
            #
            #     # show frame
            #     # cv2.imshow("image", self.curImage)
            #     # key = cv2.waitKey(1) & 0xFF
            #
            #     #renew
            #     rawCapture.truncate(0)
            #
            #     #press 'q' to stop, press any key to continue
            #     # if(key == ord("q")):
            #     #     break
            #call function final
            vs.stop()
            writer.release()
            self.final()


        else :
#.........这里部分代码省略.........
开发者ID:ChrisDan9,项目名称:Raspi-Cam,代码行数:101,代码来源:main.py

示例14: VideoStream

FRAME_WIDTH = 640
FRAME_HEIGHT = 480

# 依不同的 cascade 做調整
# lbpcascade_frontalface: 1.1
# haarcascade_frontalface_alt2: 1.3
SCALE_FACTOR = 1.1
MIN_NEIGHBORS = 5
#MIN_SIZE = 30
MIN_SIZE = 80

cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)

if ENABLE_VIDEO_STREAM:
    video_capture = VideoStream(usePiCamera=False).start()

else:
    video_capture = cv2.VideoCapture(0)
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_WIDTH)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT)

time.sleep(1)
t = ticket()

def faceDetect(gray):
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=SCALE_FACTOR,
        minNeighbors=MIN_NEIGHBORS,
        minSize=(MIN_SIZE, MIN_SIZE),
开发者ID:wwwins,项目名称:OpenCV-Samples,代码行数:31,代码来源:webcam-face-detect.py

示例15: vars

ap.add_argument("-c", "--confidence", type = float, default = 0.4, help = "minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type = int, default = 30, help = "# of skip frames between detections")
args = vars(ap.parse_args())

# initialize the list of class labels
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
            "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]

# load our serialized model from the disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# if a video path was not supplied, grab a reference to the webcam
if not args.get("input", False):
    print("[INFO] Starting video stream...")
    vs = VideoStream(src = 0).start()
    time.sleep(2.0)

# otherwise, grab a reference to the video file
else:
    print("[INFO] opening the video file...")
    vs = cv2.VideoCapture(args["input"])


# initialize the video writer
writer = None

# initialize the frame dimensions
W = None
H = None
开发者ID:sh-raza,项目名称:opencv-implementations,代码行数:30,代码来源:people_counter.py


注:本文中的imutils.video.VideoStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。