当前位置: 首页>>代码示例>>Python>>正文


Python VideoStream.stop方法代码示例

本文整理汇总了Python中imutils.video.VideoStream.stop方法的典型用法代码示例。如果您正苦于以下问题:Python VideoStream.stop方法的具体用法?Python VideoStream.stop怎么用?Python VideoStream.stop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在imutils.video.VideoStream的用法示例。


在下文中一共展示了VideoStream.stop方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
def main():
	global frame, key
	# initialize the camera and grab a reference to the raw camera capture
	wdth = int(math.floor(360))
	hgth = int(math.floor(800))
	camera = VideoStream(usePiCamera=True,resolution=(wdth,hgth)).start()
	time.sleep(2.0)
	fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	writer = None
	(h,w) = (None, None)
	# setup the mouse callback
	cv2.startWindowThread()
	cv2.namedWindow("Detection")
	cv2.setMouseCallback("Detection",mouseOn)
	# keep looping over the frames
	#for frame2 in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
	while True:
		frame = camera.read();
		frame = cv2.transpose(frame);
		frame = cv2.flip(frame,1)
		timestamp = datetime.datetime.now()
		ts = timestamp.strftime("%d/%m/%Y %H:%M:%S")
		cv2.putText(frame,ts,(10,frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.35,(0,255,0),1)
		if writer is None:
			(h,w) = frame.shape[:2]
			writer = cv2.VideoWriter("/media/usb/test_" + timestamp.strftime("%d_%m_%Y_%H%M") + ".avi", fourcc,5,(w,h), True)
		writer.write(frame)
		cv2.imshow("Detection", frame);
		#cv2.setMouseCallback("Detection",mouseOn)
		#key = cv2.waitKey(10) & 0xFF
		# if the 'q' key is pressed, stop the loop
		if key == ord("q"): #cv2.EVENT_LBUTTONDOWN: #ord("q"):
	#		cv2.destroyAllWindows()
	#		camera.stop()
			break
	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	camera.stop()
开发者ID:kel85uk,项目名称:RaspberryPi_Projects,代码行数:40,代码来源:track.py

示例2: ord

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
    #G = cv2.merge([zeros, G, zeros])
    #B = cv2.merge([B, zeros, zeros])
 
    # construct the final output frame, storing the original frame
    # at the top-left, the red channel in the top-right, the green
    # channel in the bottom-right, and the blue channel in the
    # bottom-left
    output = np.zeros((h, w, 3), dtype="uint8")
    output[0:h, 0:w] = frame
    #output[0:h, w:w * 2] = R
    #output[h:h * 2, w:w * 2] = G
    #output[h:h * 2, 0:w] = B
 
    # write the output frame to file
    writer.write(output)

    # show the frames
    cv2.imshow("Frame", frame)
    cv2.imshow("Output", output)
    key = cv2.waitKey(1) & 0xFF
 
    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break
 
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
writer.release()
开发者ID:qharryq,项目名称:crowd-control-pi,代码行数:32,代码来源:save_video.py

示例3: max

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
			(minX, maxX) = (min(minX, x), max(maxX, x + w))
			(minY, maxY) = (min(minY, y), max(maxY, y + h))

		# draw the bounding box
		cv2.rectangle(result, (minX, minY), (maxX, maxY),
			(0, 0, 255), 3)

	# increment the total number of frames read and draw the 
	# timestamp on the image
	total += 1
	timestamp = datetime.datetime.now()
	ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
	cv2.putText(result, ts, (10, result.shape[0] - 10),
		cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

	# show the output images
	cv2.imshow("Result", result)
	cv2.imshow("Left Frame", left)
	cv2.imshow("Right Frame", right)
	key = cv2.waitKey(1) & 0xFF

	# if the `q` key was pressed, break from the loop
	if key == ord("q"):
		break

# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
leftStream.stop()
rightStream.stop()
开发者ID:gautamits,项目名称:panorama_stitching,代码行数:32,代码来源:realtime_stitching.py

示例4: main

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]

#.........这里部分代码省略.........
    ap.add_argument("-p", "--prototxt", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.prototxt.txt",
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=False, default="/home/pi/Kenobi/recognition/MobileNetSSD_deploy.caffemodel",
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.6,
        help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # initialiser la liste des objets entrainés par MobileNet SSD 
    # création du contour de détection avec une couleur attribuée au hasard pour chaque objet
    CLASSES = ["arriere-plan", "avion", "velo", "oiseau", "bateau",
        "bouteille", "autobus", "voiture", "chat", "chaise", "vache", "table",
        "chien", "cheval", "moto", "personne", "plante", "mouton",
        "sofa", "train", "moniteur"]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    pygame.mixer.init()

    # chargement des fichiers depuis le répertoire de stockage 
    print(" ...chargement du modèle...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialiser la caméra du pi, attendre 2s pour la mise au point ,
    # initialiser le compteur FPS
    print("...démarrage de la Picamera...")
    vs = VideoStream(usePiCamera=True, resolution=(1600, 1200)).start()
    time.sleep(2.0)
    #fps = FPS().start()

    # boucle principale du flux vidéo
    while True:
        # récupération du flux vidéo, redimension 
        # afin d'afficher au maximum 800 pixels 
        frame = vs.read()
        frame = imutils.resize(frame, width=800)

        # récupération des dimensions et transformation en collection d'images
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)

        # determiner la détection et la prédiction 
        net.setInput(blob)
        detections = net.forward()

        # boucle de détection
        list_objects = []
        for i in np.arange(0, detections.shape[2]):
            # calcul de la probabilité de l'objet détecté en fonction de la prédiction
            confidence = detections[0, 0, i, 2]
            
            # supprimer les détections faibles inférieures à la probabilité minimale
            if confidence > args["confidence"]:
                # extraire l'index du type d'objet détecté
                # calcul des coordonnées de la fenêtre de détection 
                idx = int(detections[0, 0, i, 1])
                #box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                #(startX, startY, endX, endY) = box.astype("int")

                # creation du contour autour de l'objet détecté
                # insertion de la prédiction de l'objet détecté 
                #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
                #y = startY - 15 if startY - 15 > 15 else startY + 15
                #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                
                # enregistrement de l'image détectée 
                #cv2.imwrite("detection.png", frame)
                obj = CLASSES[idx]
                if obj not in list_objects:
                    list_objects.append(CLASSES[idx])
        
        # affichage du flux vidéo dans une fenètre 
        #cv2.imshow("Frame", frame)
        #key = cv2.waitKey(1) & 0xFF  # ligne necessaire pour l'affichage dans la frame

        # Pronounce the objects seen
        print(list_objects)
        for anobject in list_objects:
            path_to_sound = "/home/pi/Kenobi/recognition/vocabulary/" + anobject + ".ogg"
            if os.path.isfile(path_to_sound):
                pygame.mixer.music.load(path_to_sound)
                pygame.mixer.music.play()
                # Play until end of music file
                while pygame.mixer.music.get_busy() == True:
                    pygame.time.Clock().tick(10)

        # la touche q permet d'interrompre la boucle principale
        #if key == ord("q"):
        #   break

        # mise à jour du FPS 
        #fps.update()

    # arret du compteur et affichage des informations dans la console
    #fps.stop()
    #print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    #print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
开发者ID:SebGeek,项目名称:Kenobi,代码行数:104,代码来源:detect_objects.py

示例5: DualCamera

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
class DualCamera():
    def __init__(self):
        self.root = Tk()
        self.root.wm_title("Dual Cam")

        self.saving = False
        self.frames = []
        self.overlay = False

        self.image_left = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(np.zeros((256, 256)))))
        self.image_panel_left = Label(self.root, image = self.image_left)
        self.image_panel_left.grid(row = 0, column = 0, columnspan=2)

        self.image_right = ImageTk.PhotoImage(image=Image.fromarray(np.uint8(256 * np.random.rand(256, 256))))
        self.image_panel_right = Label(self.root, image = self.image_right)
        self.image_panel_right.grid(row = 0, column = 2, columnspan=2)

        self.save_button = Button(width = 10, height = 2, text = 'Save', command=self.save)
        self.save_button.grid(row = 1, column = 0)

        self.calibrate_button = Button(width = 10, height = 2, text = 'Calibrate', command=self.calibrate)
        self.calibrate_button.grid(row = 1, column = 1)

        self.close_button = Button(width = 10, height = 2, text = 'Close', command=self.quit)
        self.close_button.grid(row = 1, column = 3)

        self.overlay_button = Button(width=10, height=2, text='Overlay', command=self.toggle_overlay)
        self.overlay_button.grid(row = 1, column = 2)

        self.bias_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.bias)
        self.bias_slider.grid(row = 2, column = 1, columnspan=3)
    	self.bias_label = Label(self.root, text="Bias current")
    	self.bias_label.grid(row=2, column=0)

        self.clock_slider = Scale(self.root, from_=0, to=63, length=400, orient=HORIZONTAL, command=self.clock)
        self.clock_slider.grid(row = 3, column = 1, columnspan=3)
    	self.clock_label = Label(self.root, text="Clock speed")
    	self.clock_label.grid(row=3, column=0)

        self.cm_slider = Scale(self.root, from_=0, to=31, length=400, orient=HORIZONTAL, command=self.cm)
        self.cm_slider.grid(row = 4, column = 1, columnspan=3)
    	self.cm_label = Label(self.root, text="CM current")
    	self.cm_label.grid(row=4, column=0)

        # set default positions
    	self.cm_slider.set(0x0C)
    	self.clock_slider.set(0x15)
    	self.bias_slider.set(0x05)


        # initialize visible camera
        self.vs = VideoStream(usePiCamera=True).start()

        # thread for reading from sensor hardware intro an image queue           
        self.ir_images = Queue.LifoQueue()
        self.ir_commands = Queue.Queue()
        self.ir_calibrate = threading.Event()
        self.ir_stop = threading.Event()
        self.raw_ir_images = Queue.LifoQueue()

        self.capture_thread = threading.Thread(
                        target=ir_capture,
                        name="capture_thread",
                        args=[self.ir_images, self.ir_calibrate, self.ir_stop, self.ir_commands, self.raw_ir_images]
                        )

        self.capture_thread.start()

        self.ticktock()
        self.root.mainloop()

    def ticktock(self):
        # grab an image from the camera
        frame = self.vs.read()
        changed = False

        if frame is not None:
            frame = imutils.resize(frame, height=240)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.last_vis_frame = frame
            image = Image.fromarray(frame)

            if not self.overlay:
                self.image_right = ImageTk.PhotoImage(image=image)
                self.image_panel_right.configure(image=self.image_right);

            changed = True

        if not self.ir_images.empty():
            ir_frame = self.ir_images.get()

            if not self.ir_images.empty():
                with self.ir_images.mutex:
                    self.ir_images.queue = []

            ir_image = imutils.resize(ir_frame, height=240, inter=cv2.INTER_LINEAR)
            ir_image = np.dstack((ir_image, ir_image, ir_image))
            ir_image = cv2.LUT(ir_image, colormap).astype('uint8')
           
            self.last_ir_frame = ir_image
#.........这里部分代码省略.........
开发者ID:nens90,项目名称:thermografree,代码行数:103,代码来源:dualcam.py

示例6: viewCameraPi

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
    def viewCameraPi(self):

        #text = 'This is a message from app to inform that app start running now!'
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone1,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        #statusSMS = outboundSMSviaTwilio(account=self.account, token=self.token, destPhone=self.destPhone2,
        #                              twilioNumber=self.twilioNumber, message_body=text)
        print("START SCRIPT AND MAJOR WARNING!")
        statusSMS = 'delivered'
        if((statusSMS != 'failed') and (statusSMS != 'undelivered')):
            #camera = PiCamera()
            #camera.resolution = ( 640, 480)
            #camera.framerate = 32

            #rawCapture = PiRGBArray(camera, size=( 640, 480))
            #self.sumMSE = self.sumSSIM = self.avgMSE = self.avgSSIM = 0
            self.tempHour = datetime.datetime.now().hour
            self.tempMinute = datetime.datetime.now().minute
            self.warmup = 0

            vs = VideoStream(usePiCamera=1,resolution=(640,480)).start()
            time.sleep(1.2)

            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = None
            (h, w) = (None, None)
            zeros = None

            print ("view camera")

            while True:
                self.warmup+=1
                if(self.warmup >=5):
                    frame = vs.read()
                    if writer is None:
                        # store the image dimensions, initialzie the video writer,
                        # and construct the zeros array
                        (h, w) = frame.shape[:2]
                        writer = cv2.VideoWriter('exampleTH3.avi', fourcc, 20,
                            (w, h), True)

                    writer.write(frame)

                    self.curImage = frame
                    self.getDefImagePerHours()
                    #write xml
                    print("process frame thu {}".format(self.warmup-4))
                    self.writeXML()

                    #tat chuong trinh sau 5 phut:
                    if(datetime.datetime.now().minute - self.tempMinute >5):
                        self.final()
                        break


            # for frame in camera.capture_continuous( rawCapture, format("bgr"), use_video_port = True):
            #
            #     self.curImage = frame.array
            #     frame = vs.read()
            #     self.warmup +=1
            #     if(self.warmup >=5):
            #         self.getDefImagePerHours()
            #         # self.getDefImagePerTenMinutes()
            #
            #         #write xml
            #         print("process frame thu {}".format(self.warmup-4))
            #         self.writeXML()
            #
            #         #warning
            #         # self.warning()
            #
            #         #tat chuong trinh sau 5 phut:
            #         if(datetime.datetime.now().minute - self.tempMinute >5):
            #             self.final()
            #             break
            #     #neu la 16h, script se tu tat
            #     # tempBreak = datetime.datetime.now().hour
            #     # if(( tempBreak == 0) or (tempBreak == 6) or (tempBreak == 18) or (tempBreak == 12)):
            #     #     if(datetime.datetime.now().minute == 0):
            #     #         if((datetime.datetime.now().second >= 0) and (datetime.datetime.now().second <=3)):
            #     #             self.final()
            #     #             self.__init__(tempBreak)
            #
            #     # show frame
            #     # cv2.imshow("image", self.curImage)
            #     # key = cv2.waitKey(1) & 0xFF
            #
            #     #renew
            #     rawCapture.truncate(0)
            #
            #     #press 'q' to stop, press any key to continue
            #     # if(key == ord("q")):
            #     #     break
            #call function final
            vs.stop()
            writer.release()
            self.final()


        else :
#.........这里部分代码省略.........
开发者ID:ChrisDan9,项目名称:Raspi-Cam,代码行数:103,代码来源:main.py

示例7: len

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
        #contours
        cnts=cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        #time.sleep(10)
        center=None
        if len(cnts)>0:
                cnt=cnts[0]
                rect = cv2.minAreaRect(cnt)
                box = cv2.boxPoints(rect)
                box=np.int0(box)
                x1=(box[3][0]-(box[0][0]))
                y1=(box[3][1]-(box[0][1]))
                print 'x1', x1
                print 'y1', y1
                
                if ((abs(x1)>70)or (abs(y1)>70)):
                        #time.sleep(5)
                        cv2.imshow('OrigFrame',frame)
                        execfile("InvKin_Control.py")
                        print "picking up"
                cv2.drawContours(frame,[box],0,(0,0,255),2)                
        
        key=cv2.waitKey(1)& 0xFF
        if key==ord("q"):
               break


camera.stop()
cv2.destroyAllWindows()
          
开发者ID:srajend2,项目名称:RaspberryPi_Files,代码行数:31,代码来源:FPilot_version_Rect2.py

示例8: Matcher

# 需要导入模块: from imutils.video import VideoStream [as 别名]
# 或者: from imutils.video.VideoStream import stop [as 别名]
import time
import cv2
import imutils
from imutils.video import VideoStream


from matcher import Matcher

matcher = Matcher([("fau-logo", "./templates/fau-logo.png"),
                   ("first-logo", "./templates/first-logo.jpg"),
                   ("nextera-logo", "./templates/nextera-energy-logo.jpg"),
                   ("techgarage-logo", "./templates/techgarage-logo.png")
                   ], min_keypoints_pct_match=8)

cam = VideoStream(usePiCamera=False).start()

cnt = 0
while True:
    img = cam.read()
    cv2.imshow("Pic", img)
    print matcher.match(img)
    key = cv2.waitKey(10)
    if key == ord('q'):
       break

cam.stop()
cv2.destroyAllWindows()
开发者ID:Rchalla769,项目名称:Drones,代码行数:29,代码来源:test_matcher_rpi.py


注:本文中的imutils.video.VideoStream.stop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。