当前位置: 首页>>代码示例>>Python>>正文


Python VideoStream.read方法代码示例

本文整理汇总了Python中imutils.video.VideoStream.read方法的典型用法代码示例。如果您正苦于以下问题:Python VideoStream.read方法的具体用法?Python VideoStream.read怎么用?Python VideoStream.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在imutils.video.VideoStream的用法示例。


在下文中一共展示了VideoStream.read方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Video

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
class Video():
    def __init__(self):
        self.vs = VideoStream(usePiCamera=1 > 0).start()
        time.sleep(2.0)
        self.currentFrame = np.array([])
        self.raw_img = np.array([])


    def captureRawFrame(self):
        """
        capture frame and reverse RBG BGR and return opencv image
        """
        rawFrame = self.vs.read()
        rawFrame = imutils.resize(rawFrame, width=640)
        self.raw_img = rawFrame
        #return rawFrame

    def convertFrame(self):
        """
        converts frame to format suitable for QtGui
        """
        try:
            self.currentFrame = cv2.cvtColor(self.raw_img, cv2.COLOR_BGR2RGB)
            height, width = self.currentFrame.shape[:2]
            img = QtGui.QImage(self.currentFrame,
                               width,
                               height,
                               QtGui.QImage.Format_RGB888)
            img = QtGui.QPixmap.fromImage(img)
            #self.previousFrame = self.currentFrame
            img = img.scaledToHeight(480)
            img = img.scaledToWidth(360)
            return img
        except:
            return None
开发者ID:cyrilli,项目名称:pose-estimation_python-opencv,代码行数:37,代码来源:Video.py

示例2: recordVideo

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
class recordVideo():
    def __init__(self):
        # initialize the video stream and allow the camera
        # sensor to warmup
        self.vs = VideoStream(usePiCamera=1 > 0).start()
        time.sleep(2.0)
        self.currentFrame = np.array([])
        self.raw_img = np.array([])
        
        self.writer = None
        (h, w) = (None, None)
        
    def captureRawFrame(self):
        """
        capture frame and reverse RBG BGR and return opencv image, and also record the video
        """
        rawFrame = self.vs.read()
        rawFrame = imutils.resize(rawFrame, width=640)
        self.raw_img = rawFrame
        #return rawFrame

    def initRecord(self):
        if self.writer == None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            #(h, w) = self.raw_img.shape[:2]
            self.writer = cv2.VideoWriter('./demoVideo/'+str(int(time.time()))+'.avi', cv2.cv.FOURCC(*"XVID"), 15,
			(640 , 480 ), True)
    def record(self):
        # write the output frame to file
        self.writer.write(self.raw_img)

    def convertFrame(self):
        """
        converts frame to format suitable for QtGui
        """
        try:
            self.currentFrame = cv2.cvtColor(self.raw_img, cv2.COLOR_BGR2RGB)
            height, width = self.currentFrame.shape[:2]
            img = QtGui.QImage(self.currentFrame,
                               width,
                               height,
                               QtGui.QImage.Format_RGB888)
            img = QtGui.QPixmap.fromImage(img)
            #self.previousFrame = self.currentFrame
            img = img.scaledToHeight(480)
            img = img.scaledToWidth(360)
            return img
        except:
            return None
开发者ID:cyrilli,项目名称:pose-estimation_python-opencv,代码行数:52,代码来源:recordVideo.py

示例3: main

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
def main():
	global frame, key
	# initialize the camera and grab a reference to the raw camera capture
	wdth = int(math.floor(360))
	hgth = int(math.floor(800))
	camera = VideoStream(usePiCamera=True,resolution=(wdth,hgth)).start()
	time.sleep(2.0)
	fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	writer = None
	(h,w) = (None, None)
	# setup the mouse callback
	cv2.startWindowThread()
	cv2.namedWindow("Detection")
	cv2.setMouseCallback("Detection",mouseOn)
	# keep looping over the frames
	#for frame2 in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	while True:
		frame = camera.read();
		frame = cv2.transpose(frame);
		frame = cv2.flip(frame,1)
		timestamp = datetime.datetime.now()
		ts = timestamp.strftime("%d/%m/%Y %H:%M:%S")
		cv2.putText(frame,ts,(10,frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.35,(0,255,0),1)
		if writer is None:
			(h,w) = frame.shape[:2]
			writer = cv2.VideoWriter("/media/usb/test_" + timestamp.strftime("%d_%m_%Y_%H%M") + ".avi", fourcc,5,(w,h), True)
		writer.write(frame)
		cv2.imshow("Detection", frame);
		#cv2.setMouseCallback("Detection",mouseOn)
		#key = cv2.waitKey(10) & 0xFF
		# if the 'q' key is pressed, stop the loop
		if key == ord("q"): #cv2.EVENT_LBUTTONDOWN: #ord("q"):
	#		cv2.destroyAllWindows()
	#		camera.stop()
			break
	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	camera.stop()
开发者ID:kel85uk,项目名称:RaspberryPi_Projects,代码行数:40,代码来源:track.py

示例4:

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
# otherwise, grab a reference to the video file
else:
	video_file = cv2.VideoCapture(args["video"])

# keep looping
while True:
	# grab the current frame
	if from_vfile:
		(grabbed, frame) = video_file.read()

		# if we are viewing a video and we did not grab a frame,
		# then we have reached the end of the video
		if not grabbed:
			break
	else:
		frame = camera.read()


	# resize the frame, blur it, and convert it to the HSV
	# color space
	frame = imutils.resize(frame, width=600)

	# record video
	if record_video:
		if video_writer is None: # create interface
			(h, w) = frame.shape[:2]
			fourcc = cv2.VideoWriter_fourcc(*'MJPG')
			# Note: the extension of the filename must be avi
			video_writer = cv2.VideoWriter(args["record_video"], fourcc, 20, (w,h), True)
			#zeros = np.zeros((h,w), dtype="uint8")
开发者ID:myshepherd,项目名称:tritoneye,代码行数:32,代码来源:ball_tracking.py

示例5: len

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
        minSize=(MIN_SIZE, MIN_SIZE),
        flags=cv2.CASCADE_SCALE_IMAGE
    )

    if len(faces)>0:
        print ("Found {0} faces!".format(len(faces)))

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

    return frame

while True:
    # Capture frame-by-frame
    _,frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if ENABLE_FACE_DETECT:
        resultFrame = faceDetect(gray)
        # Display the resulting frame
        cv2.imshow('Video', resultFrame)

    if ENABLE_FPS:
        print("fps:",t.fps())

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
开发者ID:wwwins,项目名称:OpenCV-Samples,代码行数:33,代码来源:webcam-face-detect.py

示例6: print

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
print("[INFO] starting cameras...")
leftStream = VideoStream(src=1).start()
#rightStream = VideoStream(usePiCamera=True).start()
rightStream = VideoStream(src=0).start()
time.sleep(2.0)

# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
motion = BasicMotionDetector(minArea=500)
total = 0

# loop over frames from the video streams
while True:
	# grab the frames from their respective video streams
	left = leftStream.read()
	right = rightStream.read()

	# resize the frames
	left = imutils.resize(left, width=400)
	right = imutils.resize(right, width=400)

	# stitch the frames together to form the panorama
	# IMPORTANT: you might have to change this line of code
	# depending on how your cameras are oriented; frames
	# should be supplied in left-to-right order
	result = stitcher.stitch([left, right])

	# no homograpy could be computed
	if result is None:
		print("[INFO] homography could not be computed")
开发者ID:gautamits,项目名称:panorama_stitching,代码行数:33,代码来源:realtime_stitching.py

示例7: main

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
def main():
    # construction des arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p", "--prototxt", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.prototxt.txt",
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.caffemodel",
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.6,
        help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # initialiser la liste des objets entrainés par MobileNet SSD 
    # création du contour de détection avec une couleur attribuée au hasard pour chaque objet
    CLASSES = ["arriere-plan", "avion", "velo", "oiseau", "bateau",
        "bouteille", "autobus", "voiture", "chat", "chaise", "vache", "table",
        "chien", "cheval", "moto", "personne", "plante", "mouton",
        "sofa", "train", "moniteur"]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    pygame.mixer.init()

    # chargement des fichiers depuis le répertoire de stockage 
    print(" ...chargement du modèle...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialiser la caméra du pi, attendre 2s pour la mise au point ,
    # initialiser le compteur FPS
    print("...démarrage de la Picamera...")
    vs = VideoStream(usePiCamera=True, resolution=(1600, 1200)).start()
    time.sleep(2.0)
    #fps = FPS().start()

    # boucle principale du flux vidéo
    while True:
        # récupération du flux vidéo, redimension 
        # afin d'afficher au maximum 800 pixels 
        frame = vs.read()
        frame = imutils.resize(frame, width=800)

        # récupération des dimensions et transformation en collection d'images
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)

        # determiner la détection et la prédiction 
        net.setInput(blob)
        detections = net.forward()

        # boucle de détection
        list_objects = []
        for i in np.arange(0, detections.shape[2]):
            # calcul de la probabilité de l'objet détecté en fonction de la prédiction
            confidence = detections[0, 0, i, 2]
            
            # supprimer les détections faibles inférieures à la probabilité minimale
            if confidence > args["confidence"]:
                # extraire l'index du type d'objet détecté
                # calcul des coordonnées de la fenêtre de détection 
                idx = int(detections[0, 0, i, 1])
                #box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                #(startX, startY, endX, endY) = box.astype("int")

                # creation du contour autour de l'objet détecté
                # insertion de la prédiction de l'objet détecté 
                #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
                #y = startY - 15 if startY - 15 > 15 else startY + 15
                #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                
                # enregistrement de l'image détectée 
                #cv2.imwrite("detection.png", frame)
                obj = CLASSES[idx]
                if obj not in list_objects:
                    list_objects.append(CLASSES[idx])
        
        # affichage du flux vidéo dans une fenètre 
        #cv2.imshow("Frame", frame)
        #key = cv2.waitKey(1) & 0xFF  # ligne necessaire pour l'affichage dans la frame

        # Pronounce the objects seen
        print(list_objects)
        for anobject in list_objects:
            path_to_sound = "/home/pi/Kenobi/recognition/vocabulary/" + anobject + ".ogg"
            if os.path.isfile(path_to_sound):
                pygame.mixer.music.load(path_to_sound)
                pygame.mixer.music.play()
                # Play until end of music file
                while pygame.mixer.music.get_busy() == True:
                    pygame.time.Clock().tick(10)

        # la touche q permet d'interrompre la boucle principale
        #if key == ord("q"):
        #   break

        # mise à jour du FPS 
        #fps.update()

    # arret du compteur et affichage des informations dans la console
    #fps.stop()
    #print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    #print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
#.........这里部分代码省略.........
开发者ID:SebGeek,项目名称:Kenobi,代码行数:103,代码来源:detect_objects.py

示例8: VideoStream

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
redUpper1 = (10, 255, 255)
redLower2 = (160, 100, 100)
redUpper2 = (179, 255, 255)

# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
(dX, dY) = (0, 0)

video_stream = VideoStream(usePiCamera=False, resolution=(640,480), framerate=32).start()
time.sleep(2)

# keep looping
while True:
	# grab the current frame
	# image = video_stream.read()
	frame = video_stream.read()

	# resize the frame, blur it, and convert it to the HSV
	# color space
	# frame = imutils.resize(image, width=400)
	blurred = cv2.GaussianBlur(frame, (11, 11), 0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

	# construct a mask for the color "green", then perform
	# a series of dilations and erosions to remove any small
	# blobs left in the mask
	mask1 = cv2.inRange(hsv, redLower1, redUpper1)
	mask2 = cv2.inRange(hsv, redLower2, redUpper2)
	mask = mask1 + mask2
	mask = cv2.erode(mask, None, iterations=2)
	mask = cv2.dilate(mask, None, iterations=2)
开发者ID:coffeecold,项目名称:VideoTracker,代码行数:33,代码来源:object_tracker.py

示例9: Matcher

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
import time
import cv2
import imutils
from imutils.video import VideoStream


from matcher import Matcher

matcher = Matcher([("fau-logo", "./templates/fau-logo.png"),
                   ("first-logo", "./templates/first-logo.jpg"),
                   ("nextera-logo", "./templates/nextera-energy-logo.jpg"),
                   ("techgarage-logo", "./templates/techgarage-logo.png")
                   ], min_keypoints_pct_match=8)

cam = VideoStream(usePiCamera=False).start()

cnt = 0
while True:
    img = cam.read()
    cv2.imshow("Pic", img)
    print matcher.match(img)
    key = cv2.waitKey(10)
    if key == ord('q'):
       break

cam.stop()
cv2.destroyAllWindows()
开发者ID:Rchalla769,项目名称:Drones,代码行数:29,代码来源:test_matcher_rpi.py

示例10: print

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
fourcc = cv2.VideoWriter_fourcc(*args["codec"])
out = None
(h, w) = (None, None)
zeros = None
A = None
wait_time = None
record = False
WindowName = args["windowname"]

n=0
print("Initialize streaming")
while True:
    t0 = time.time()

    #grab the frame from the video stream and resize it to have a max width
    gray = vs.read()
    # frame = imutils.resize(frame, args["width"])
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow(WindowName, gray)

    if record == True:
        output = np.zeros((h, w),dtype="uint8")
        output[0:h, 0:w] = gray
        #output = np.zeros((h, w, 3),dtype="uint8")
        #output[0:h, 0:w] = frame #grayscale movie
        out.write(output)
        cv2.imshow(WindowName + "_RECORDING", output)
        n+=1

        if (n % 100) == 0:
            print("The number of frames collected: {0} frames".format(n))
开发者ID:ackmanlab,项目名称:piCamera,代码行数:33,代码来源:stream_to_avi.py

示例11: DualCamera

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
class DualCamera():
    def __init__(self):
        self.root = Tk()
        self.root.wm_title("Dual Cam")

        self.saving = False
        self.frames = []
        self.overlay = False

        self.image_left = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(np.zeros((256, 256)))))
        self.image_panel_left = Label(self.root, image = self.image_left)
        self.image_panel_left.grid(row = 0, column = 0, columnspan=2)

        self.image_right = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(256 * np.random.rand(256, 256))))
        self.image_panel_right = Label(self.root, image = self.image_right)
        self.image_panel_right.grid(row = 0, column = 2, columnspan=2)

        self.save_button = Button(width = 10, height = 2, text = 'Save', command=self.save)
        self.save_button.grid(row = 1, column = 0)

        self.calibrate_button = Button(width = 10, height = 2, text = 'Calibrate', command=self.calibrate)
        self.calibrate_button.grid(row = 1, column = 1)

        self.close_button = Button(width = 10, height = 2, text = 'Close', command=self.quit)
        self.close_button.grid(row = 1, column = 3)

        self.overlay_button = Button(width=10, height=2, text='Overlay', command=self.toggle_overlay)
        self.overlay_button.grid(row = 1, column = 2)

        self.bias_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.bias)
        self.bias_slider.grid(row = 2, column = 1, columnspan=3)
    	self.bias_label = Label(self.root, text="Bias current")
    	self.bias_label.grid(row=2, column=0)

        self.clock_slider = Scale(self.root, from_=0, to=63, length=400, orient=HORIZONTAL, command=self.clock)
        self.clock_slider.grid(row = 3, column = 1, columnspan=3)
    	self.clock_label = Label(self.root, text="Clock speed")
    	self.clock_label.grid(row=3, column=0)

        self.cm_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.cm)
        self.cm_slider.grid(row = 4, column = 1, columnspan=3)
    	self.cm_label = Label(self.root, text="CM current")
    	self.cm_label.grid(row=4, column=0)

        # set default positions
    	self.cm_slider.set(0x0C)
    	self.clock_slider.set(0x15)
    	self.bias_slider.set(0x05)


        # initialize visible camera
        self.vs = VideoStream(usePiCamera=True).start()

        # thread for reading from sensor hardware intro an image queue           
        self.ir_images = Queue.LifoQueue()
        self.ir_commands = Queue.Queue()
        self.ir_calibrate = threading.Event()
        self.ir_stop = threading.Event()
        self.raw_ir_images = Queue.LifoQueue()

        self.capture_thread = threading.Thread(
                        target=ir_capture,
                        name="capture_thread",
                        args=[self.ir_images, self.ir_calibrate, self.ir_stop, self.ir_commands, self.raw_ir_images]
                        )

        self.capture_thread.start()

        self.ticktock()
        self.root.mainloop()

    def ticktock(self):
        # grab an image from the camera
        frame = self.vs.read()
        changed = False

        if frame is not None:
            frame = imutils.resize(frame, height=240)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.last_vis_frame = frame
            image = Image.fromarray(frame)

            if not self.overlay:
                self.image_right = ImageTk.PhotoImage(image=image)
                self.image_panel_right.configure(image=self.image_right);

            changed = True

        if not self.ir_images.empty():
            ir_frame = self.ir_images.get()

            if not self.ir_images.empty():
                with self.ir_images.mutex:
                    self.ir_images.queue = []

            ir_image = imutils.resize(ir_frame, height=240, inter=cv2.INTER_LINEAR)
            ir_image = np.dstack((ir_image, ir_image, ir_image))
            ir_image = cv2.LUT(ir_image, colormap).astype('uint8')
           
            self.last_ir_frame = ir_image
#.........这里部分代码省略.........
开发者ID:nens90,项目名称:thermografree,代码行数:103,代码来源:dualcam.py

示例12: viewCameraPi

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
    def viewCameraPi(self):

        #text = 'This is a message from app to inform that app start running now!'
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone1,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone2,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        print("START SCRIPT AND MAJOR WARNING!")
        statusSMS = 'delivered'
        if((statusSMS != 'failed') and (statusSMS != 'undelivered')):
            #camera = PiCamera()
            #camera.resolution = ( 640, 480)
            #camera.framerate = 32

            #rawCapture = PiRGBArray(camera, size=( 640, 480))
            #self.sumMSE = self.sumSSIM = self.avgMSE = self.avgSSIM = 0
            self.tempHour = datetime.datetime.now().hour
            self.tempMinute = datetime.datetime.now().minute
            self.warmup = 0

            vs = VideoStream(usePiCamera=1,resolution=(640,480)).start()
            time.sleep(1.2)

            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = None
            (h, w) = (None, None)
            zeros = None

            print ("view camera")

            while True:
                self.warmup+=1
                if(self.warmup >=5):
                    frame = vs.read()
                    if writer is None:
                        # store the image dimensions, initialzie the video writer,
                        # and construct the zeros array
                        (h, w) = frame.shape[:2]
                        writer = cv2.VideoWriter('exampleTH3.avi', fourcc, 20,
                            (w, h), True)

                    writer.write(frame)

                    self.curImage = frame
                    self.getDefImagePerHours()
                    #write xml
                    print("process frame thu {}".format(self.warmup-4))
                    self.writeXML()

                    #tat chuong trinh sau 5 phut:
                    if(datetime.datetime.now().minute - self.tempMinute >5):
                        self.final()
                        break


            # for frame in camera.capture_continuous( rawCapture, format("bgr"), use_video_port = True):
            #
            #     self.curImage = frame.array
            #     frame = vs.read()
            #     self.warmup +=1
            #     if(self.warmup >=5):
            #         self.getDefImagePerHours()
            #         # self.getDefImagePerTenMinutes()
            #
            #         #write xml
            #         print("process frame thu {}".format(self.warmup-4))
            #         self.writeXML()
            #
            #         #warning
            #         # self.warning()
            #
            #         #tat chuong trinh sau 5 phut:
            #         if(datetime.datetime.now().minute - self.tempMinute >5):
            #             self.final()
            #             break
            #     #neu la 16h, script se tu tat
            #     # tempBreak = datetime.datetime.now().hour
            #     # if(( tempBreak == 0) or (tempBreak == 6) or (tempBreak == 18) or (tempBreak == 12)):
            #     #     if(datetime.datetime.now().minute == 0):
            #     #         if((datetime.datetime.now().second >= 0) and (datetime.datetime.now().second <=3)):
            #     #             self.final()
            #     #             self.__init__(tempBreak)
            #
            #     # show frame
            #     # cv2.imshow("image", self.curImage)
            #     # key = cv2.waitKey(1) & 0xFF
            #
            #     #renew
            #     rawCapture.truncate(0)
            #
            #     #press 'q' to stop, press any key to continue
            #     # if(key == ord("q")):
            #     #     break
            #call function final
            vs.stop()
            writer.release()
            self.final()


        else :
#.........这里部分代码省略.........
开发者ID:ChrisDan9,项目名称:Raspi-Cam,代码行数:103,代码来源:main.py

示例13: __init__

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
class TEVideoHandler:
	def __init__(self):
		self.FRAME_WIDTH = conf.DEF_FRAME_WIDTH
		self.FRAME_HEIGHT = conf.DEF_FRAME_HEIGHT

		# devices
		self.video_file = None
		self.camera = None
		self.picamera = None
		
	def set_frame_size(w, h):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Frame size need to be set before initialization")

		self.FRAME_WIDTH = w
		self.FRAME_HEIGHT = h

	def initialize_with_file(self, filename):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.video_file = cv2.VideoCapture(filename)

	def initialize_with_configured_cam(self):
		cam_selector = {
			conf.CameraType.PYCAMERA:			lambda: self.initialize_with_pycamera(),
			conf.CameraType.PYCAMERA_ROBUST:	lambda: self.initialize_with_pycamera2(),
			conf.CameraType.WEBCAM:				lambda: self.initialize_with_webcam(),
		}

		cam_selector[conf.CAMERA_TYPE]()


	def initialize_with_pycamera(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.camera = VideoStream(usePiCamera=True).start()
		time.sleep(2.0)

	# It uses picamera library to disable auto control feature
	def initialize_with_pycamera2(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.picamera = PiCamera()
		self.picamera.resolution = (self.FRAME_WIDTH, self.FRAME_HEIGHT)
		self.picamera.framerate = 30
		self.rawCapture = PiRGBArray(self.picamera, size=(self.FRAME_WIDTH, self.FRAME_HEIGHT))

		time.sleep(0.1)

		self.picamera.shutter_speed = self.picamera.exposure_speed
		self.picamera.exposure_mode = 'off'
		g = self.picamera.awb_gains
		self.picamera.awb_mode = 'off'
		self.picamera.awb_gains = g
		self.stream = self.picamera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True)


	# Tested with a monitor webcam, but didn't checked with the webcam macbook
	def initialize_with_webcam(self):
		if self.video_file is not None or self.camera is not None or self.picamera is not None:
			raise TEVideoException("Already Initialized")

		self.camera = VideoStream().start()
		time.sleep(2.0)

	# Read a frame	
	# Return: frame (pixel array)
	# Note: if not grapped (for video file), raise exception
	def read(self):
		frame = None
		if self.video_file is not None:
			(grabbed, frame) = self.video_file.read()
			if not grabbed:
				raise TEInvalidFrameException()
		elif self.camera is not None:
			frame = self.camera.read()
		elif self.picamera is not None:
			data = self.stream.next()
			frame = data.array
			self.rawCapture.truncate(0)

		# If still null frame,
		if frame is None:
			raise TEInvalidFrameException()

		# resize the frame
		frame = imutils.resize(frame, width=self.FRAME_WIDTH)

		return frame

	def release(self):
		if self.video_file is not None:
			self.video_file.release()
		elif self.camera is not None:
			# Pycamera may not have the release function
			if hasattr(self.camera, 'release'): 
				self.camera.release()
开发者ID:myshepherd,项目名称:tritoneye,代码行数:102,代码来源:videoutil.py

示例14: deque

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
green_upper = (80, 255, 150)
pts = deque(maxlen=args["buffer"])

camera = VideoStream(usePiCamera=args["picamera"] > 0).start()
#go to target
theta_total="1/011/2/020/3/155/4/080"
print theta_total
print 'going to target'
ser.write(theta_total)
#time.sleep(2)
ser.write(theta_total)
#time.sleep(8)

time.sleep(2.0)
while(1):
        (frame)=camera.read()        
        frame=imutils.resize(frame, width=400)
        hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask=cv2.inRange(hsv, green_lower, green_upper)
        cv2.imshow('OrigFrame',frame)
        #cv2.imshow('Original Mask',mask)
        mask=cv2.erode(mask, None, iterations=3)
        #cv2.imshow('Erode Mask',mask)
        mask=cv2.dilate(mask, None, iterations=3)
        cv2.imshow('Dilate Mask',mask)
        #contours
        cnts=cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        #time.sleep(10)
        center=None
        if len(cnts)>0:
开发者ID:srajend2,项目名称:RaspberryPi_Files,代码行数:33,代码来源:FPilot_version_Rect2.py

示例15: VideoStream

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import read [as 别名]
import cv2
import sys
import time
from imutils.video import VideoStream
import imutils

#setting up the PiCamera and it's variables
video_stream = VideoStream(usePiCamera=True, resolution=(480,320), framerate=25).start()
time.sleep(2)

x = 0


# capture frames from the camera using the piCamera library
while True:
    print(x)

    # Capture frame-by-frame
    image = video_stream.read()

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    cv2.imshow("Frame", gray)
    cv2.imwrite("test-images\frame{}.jpg".format(x), gray)
    key = cv2.waitKey(1) & 0xFF
    x = x + 1
    
    #break code
    if key == cv2.waitKey(1) & 0xFF == ord('q'):
        break
开发者ID:coffeecold,项目名称:VideoTracker,代码行数:32,代码来源:save-frames.py


注:本文中的imutils.video.VideoStream.read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。