当前位置: 首页>>代码示例>>Python>>正文


Python VideoCapture.read方法代码示例

本文整理汇总了Python中cv2.VideoCapture.read方法的典型用法代码示例。如果您正苦于以下问题:Python VideoCapture.read方法的具体用法?Python VideoCapture.read怎么用?Python VideoCapture.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2.VideoCapture的用法示例。


在下文中一共展示了VideoCapture.read方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: start

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
def start():
    '''
    
    '''
    #Load splash screen
    splScr = splash()
    found = []
    #find connected cameras        
    for num in range(10):
        cam = VideoCapture(num)
        cam.open
        #show progress bar 'movement' while the main program find cameras
        splScr.update()
        if not cam.read()[0]:
            del(cam)
        else:
            cam.release()
            found.append(num)
        while gtk.events_pending():
            gtk.main_iteration()
    #destroy splash screen when all cameras are finded
    splScr.destroy()
    print 'connected cameras:', len(found)
    #run main program
    main_gui(found)
    gtk.main()
    return
开发者ID:xDMxosiris,项目名称:hekate,代码行数:29,代码来源:gui_main.py

示例2: __init__

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class CaptureSource:
    def __init__(self):
        self._video = None
        self._image = None

    def get_resolution(self):
        if (self._video):
            return (int(self._video.get(CAP_PROP_FRAME_WIDTH)),
                    int(self._video.get(CAP_PROP_FRAME_HEIGHT)))

    def camera(self, num_cams):
        cam = Cameras()
        cam.check_cameras(num_cams)
        self._video = cam.show_and_select_camera()

    def video(self, filename):
        self._video = VideoCapture(filename)

    def image(self, filename):
        self._image = filename

    def get_frame(self):
        if self._video:
            retval, frame = self._video.read()
            return retval, frame
        return True, imread(self._image)
开发者ID:Virako,项目名称:Rocamgo-ng,代码行数:28,代码来源:capture_source.py

示例3: Camera

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class Camera(object):
    """
    The class responsible for communicating with the actual camera and
    getting images from it.
    
    Attributes:
        cam: An instance of an openCV VideoCapture. 
    """
    
    def __init__(self, device_num):
        """
        Uses a device num in case the system has multiple cameras attached.
        """
        
        self.cam = VideoCapture(device_num) 
        
    def get_image(self):
        """
        Grab a frame from the camera. The cameraCommunicator is the caller,
        and is responsible for lighting and location. The filename of the
        image is returned. 
        
        Raises:
            FatalCameraException: An image was not taken successfully.
        """
        
        #create the systematic filename
        timestamp = datetime.datetime.now()
        filename = utils.get_image_dir() + str(timestamp.date()) + \
                    str(timestamp.hour) + str(timestamp.minute) + \
                    str(timestamp.second) + '.jpg'
        
        #A series of reads to allow the camera to adjust to lighting
        self.cam.read()
        self.cam.read()
        self.cam.read()
        self.cam.read()
        
        #only the last is saved
        success, image = self.cam.read()
        
        if not success:
            raise FatalCameraException()
        else:
            imwrite(filename, image)
            
        return timestamp, filename
开发者ID:CraigBryan,项目名称:pellinglab_twitter_microscope,代码行数:49,代码来源:camera_communicator.py

示例4: Window

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class Window(object):
    def __init__(self, title="Video Stream"):
        ''' Uses OpenCV 2.3.1 method of accessing camera '''
        self.title = title
        self.cap = VideoCapture(0)
        self.prev = self.get_frame()
        self.frame = self.get_frame()
        namedWindow(title, 1) 

    def get_frame(self):
        success, frame = self.cap.read()
        return self.to_grayscale(frame) if success else False

    def to_grayscale(self, frame):
        return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    def optical_flow(self): # FIXME
        flow = CreateImage(GetSize(frame), 32, 2)
        CalcOpticalFlowFarneback(self.prev, self.frame, flow, # takes 0.05s
            pyr_scale=0.5, levels=3, winsize=15,
            iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
        return flow

    def disparity(self):
        if self.left is None or self.right is None:
            print "Capture left and right images using 'l' and 'r' keys before running disparity"
            return None

        hl, wl = self.left.shape
        hr, wr = self.right.shape
        disp_left  = cv2.cv.CreateMat(hl, wl, cv2.cv.CV_16S)
        disp_right = cv2.cv.CreateMat(hr, wr, cv2.cv.CV_16S)
        state = cv2.cv.CreateStereoGCState(16,2)
        # running the graph-cut algorithm
        from cv2.cv import fromarray
        cv2.cv.FindStereoCorrespondenceGC(fromarray(self.left), fromarray(self.right), disp_left, disp_right, state)
        cv2.cv.Save( "left.png", disp_left) # save the map
        cv2.cv.Save( "right.pgm", disp_right) # save the map

    def mainloop(self):
        while True:
            self.prev = self.frame
            self.frame = self.get_frame()
            sift(self.frame) # takes ~0.14s!
            imshow(self.title, self.frame)
            k = waitKey(10)
            if k == -1:
                pass
            elif chr(k) == 'l':
                self.left = self.frame
            elif chr(k) == 'r':
                self.right = self.frame
            elif chr(k) == 'd':
                self.disparity()
            elif k == 27:
                break
开发者ID:blagarde,项目名称:robot,代码行数:58,代码来源:main.py

示例5: Camera_Capture

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class Camera_Capture():
    """
    VideoCapture without uvc control using cv2.VideoCapture
    """
    def __init__(self,src_id,size=(640,480),fps=None,timebase=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

        if timebase == None:
            logger.debug("Capture will run with default system timebase")
            self.timebase = c_double(0)
        elif isinstance(timebase,c_double):
            logger.debug("Capture will run with app wide adjustable timebase")
            self.timebase = timebase
        else:
            logger.error("Invalid timebase variable type. Will use default system timebase")
            self.timebase = c_double(0)


    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def get_now(self):
        return time()

    def create_atb_bar(self,pos):
        size = 0,0
        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
开发者ID:Azique,项目名称:pupil,代码行数:57,代码来源:__init__.py

示例6: video_loop

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
def video_loop(aframes_queue,person_queue):
    vc = VideoCapture(0)
    rval, frame = vc.read()
    people = {}
    colors = ((0,0,255),(255,255,0))
    while True:
        rval, frame = vc.read()
        if frame is None:
            c = waitKey(10)
            continue
        aframe = NP.asarray(frame[:,:])
        im = Image.fromarray(frame)
        draw = ImageDraw.Draw(im)
        
        while not person_queue.empty():
            name,rect,name_size = person_queue.get()
            people[name] = {'rect' : rect, 'name_size' : name_size, 
                            'time_found' : time.time()}

        name_counter = 0        
        for name in people.keys():
            if name_counter < 2:
                draw_name(draw, people[name], name, name_counter, colors[name_counter])
            name_counter += 1
            
            if time.time()>people[name]['time_found']+2:
                # stop displaying after 2 seconds
                people.pop(name)
                
        frame2 = NP.array(im)
        imshow('frame',frame2)


        if aframes_queue.empty():
            aframes_queue.put(aframe)
        c = waitKey(1)
        if c == 27: # exit on ESC
            break
    
    vc.release()
    destroyAllWindows()
开发者ID:oran28,项目名称:video_face_recognise,代码行数:43,代码来源:main.py

示例7: caputure

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
def caputure():
    # open Camera
    cam = VideoCapture(0)
    if not cam.isOpened():
        LOGGER.debug('FAILED to open camera!!!')
        return None

    # capture image
    for i in range(100):
        status, img = cam.read()
    if not status:
        LOGGER.debug('FAiLED to capture image!!!')
        return None

    cam.release()
    return img
开发者ID:tlipoma,项目名称:SouthlakeAWOS,代码行数:18,代码来源:Camera.py

示例8: grabImageFromUSB

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
def grabImageFromUSB(cameraNumber=0):
    '''Grabs a snapshot from the specified USB camera.

    Returns bool, video frame decoded as a JPEG bytearray.
    '''
    from cv2 import VideoCapture, imencode

    # initialize the camera
    cam = VideoCapture(cameraNumber)
    retVal, rawData = cam.read()
    if not retVal:
        # frame captured returns errors
        return False, None
    retVal, jpgData = imencode('.jpg', rawData)
    if not retVal:
        # image encode errors
        return False, None
    return retVal, bytearray(jpgData)
开发者ID:corerd,项目名称:PyDomo,代码行数:20,代码来源:camgrab.py

示例9: check_cameras_linux

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
    def check_cameras_linux(self, num=MAX_CAMERAS):
        """Comprueba las cámaras disponibles.

        :Param num: máximo número de cámaras a comprobar
        :Keyword num: 99 por defecto, ya que en Linux es lo permitido
        :Param num: int
        :Return: lista de cámaras disponibles
        :Rtype: list of Capture
        """
        n = 0
        while len(self.cameras) < num and n <= MAX_CAMERAS:
            camera = VideoCapture(n)
            retval, frame = camera.read()
            if retval:
                self.cameras.append(camera)
            n += 1
        if num != MAX_CAMERAS and len(self.cameras) != num:
            print("Found %d of %d cameras. " % (len(self.cameras), num))
            exit()
        return len(self.cameras)
开发者ID:Virako,项目名称:Rocamgo-ng,代码行数:22,代码来源:cameras.py

示例10: Camera_Capture

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class Camera_Capture():
    """
    VideoCapture without uvc control using cv2.VideoCapture
    """
    def __init__(self,src_id,size=(640,480),fps=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def create_atb_bar(self,pos):
        size = 0,0
        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
开发者ID:WillemVlakveld,项目名称:pupil,代码行数:43,代码来源:__init__.py

示例11: Camera_Capture

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class Camera_Capture(object):
    """docstring for uvcc_camera"""

    def __init__(self, cam, size=(640, 480), fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0)
        except KeyError:
            pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)

    def re_init(self, cam, size=(640, 480), fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0)
        except KeyError:
            pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)

        # recreate the bar with new values
        bar_pos = self.bar._get_position()
        self.bar.destroy()
        self.create_atb_bar(bar_pos)

    def re_init_cam_by_src_id(self, src_id):
        try:
            cam = Camera_List()[src_id]
        except KeyError:
            logger.warning("could not reinit capture, src_id not valid anymore")
            return
        self.re_init(cam, self.get_size())

    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp, img)

    def set_size(self, size):
        width, height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self, fps):
        self.capture.set(5, fps)

    def get_fps(self):
        return self.capture.get(5)

    def create_atb_bar(self, pos):
        # add uvc camera controls to a separate ATB bar
        size = (200, 200)

        self.bar = atb.Bar(
            name="Camera_Controls",
            label=self.name,
            help="UVC Camera Controls",
            color=(50, 50, 50),
            alpha=100,
            text="light",
            position=pos,
            refresh=2.0,
            size=size,
        )

        sorted_controls = [c for c in self.controls.itervalues()]
        sorted_controls.sort(key=lambda c: c.order)

        cameras_enum = atb.enum("Capture", dict([(c.name, c.src_id) for c in Camera_List()]))

        self.bar.add_var("Capture", vtype=cameras_enum, getter=lambda: self.src_id, setter=self.re_init_cam_by_src_id)
        for control in sorted_controls:
            name = control.atb_name
            if control.type == "bool":
                self.bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val)
            elif control.type == "int":
                self.bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val)
                self.bar.define(definition="min=" + str(control.min), varname=name)
                self.bar.define(definition="max=" + str(control.max), varname=name)
                self.bar.define(definition="step=" + str(control.step), varname=name)
            elif control.type == "menu":
                if control.menu is None:
                    vtype = None
                else:
                    vtype = atb.enum(name, control.menu)
                self.bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val)
#.........这里部分代码省略.........
开发者ID:JoelGoncalvesTUM,项目名称:pupil,代码行数:103,代码来源:__init__.py

示例12: MouseTracker

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
class MouseTracker(object):
    def __init__(self, mouse, n=1, data_dir='.', diff_thresh=80, resample=1, translation_max=50, smoothing_kernel=19, consecutive_skip_threshold=0.08, selection_from=[], point_mode='auto'):
        self.mouse = mouse
        self.n = n
        self.data_dir = data_dir
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel

        # Parameters (you should not vary)
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1
        
        self.fh = FileHandler(self.data_dir, self.mouse, self.n)

        self.framei = 0
        self.load_time()
        self.consecutive_skip_threshold = (self.fs/self.resample) * consecutive_skip_threshold
        self.load_background()
        self.height, self.width = self.background.shape
        self.mov = VideoCapture(self.fh.get_path(self.fh.TRIAL, self.fh.MOV))
        self.mov.read();self.time=self.time[1:]
        #self.get_frame(self.mov,n=40) #MUST ADJUST TIME IF USING THIS
        self.load_pts(mode=point_mode)
        self.make_rooms()

    def end(self):
        self.results = dict(pos=self.pos, time=np.array(self.t)-self.t[0], guess=self.guess, heat=self.heat, contour=self.contour, pct_xadj=self.pct_xadj)
        np.savez(self.fh.make_path('tracking.npz'), **self.results)
        savemat(self.fh.make_path('tracking.mat'), self.results)
        
        self.mov.release()
        destroyAllWindows()
    def man_update(self, d):
        for k,v in d.items():
            setattr(self,k,v)
    def make_rooms(self):
        self.path_x = mpl_path.Path(self.pts[np.array([self.xmli,self.xoli,self.xori,self.xmri])])
        self.path_y = mpl_path.Path(self.pts[np.array([self.ymli,self.yoli,self.yori,self.ymri])])
        self.path_z = mpl_path.Path(self.pts[np.array([self.zmli,self.zoli,self.zori,self.zmri])])

        #experimental: hand in frame on x room
        self.path_x_adj = mpl_path.Path(self.pts[np.array([self.xoli,self.xoli_adj,self.xori_adj,self.xori])])
        self.xadj_mask = np.zeros((self.height,self.width))
        for iy in xrange(self.xadj_mask.shape[0]):
            for ix in xrange(self.xadj_mask.shape[1]):
                self.xadj_mask[iy,ix] = self.path_x_adj.contains_point([ix,iy])
        self.xadj_idxs = np.squeeze(np.argwhere(self.xadj_mask==True))

        self.border_mask = np.zeros((self.height,self.width))
        pthpts = self.pts[np.array([self.yoli_adj,self.yori_adj,self.ymri,self.ycri,self.zmli,self.zoli_adj,self.zori_adj,self.zmri,self.zcri,self.xmli,self.xoli_adj,self.xori_adj,self.xmri,self.xcri,self.ymli])]
        pth = mpl_path.Path(pthpts)
        for iy in xrange(self.border_mask.shape[0]):
            for ix in xrange(self.border_mask.shape[1]):
                self.border_mask[iy,ix] = pth.contains_point([ix,iy])
    def classify_pts(self):
        #stored in (x,y)
        #c: center
        #m: middle
        #o: out
        #x: bottom arm, y: left arm, z: right arm
        #l: left when going down arm, r: right when going down arm
        #pt is: [x/y/z c/m/o l/r]
        X,Y = 0,1
        def nn(pidx,n,ex=[]):
            #idxs of n closest pts to p, excluding all idxs in ex
            p = self.pts[pidx]
            ds = np.array([dist(pp,p) for pp in self.pts])
            idxs =  np.argsort(ds)
            idxs = np.array([i for i in idxs if i not in ex])
            return idxs[:n]
        def sortby(pidxs, dim):
            pts = self.pts[np.array(pidxs)]
            return pidxs[np.argsort(pts[:,dim])]
        dists = np.array([dist(self.pts_c, p) for p in self.pts])
        c3i = self.c3i[np.argsort(self.pts[self.c3i][:,0])]
        m6i = self.m6i
        o6i = self.o6i
        
        #classify them:
        xcri=ycli=c3i[0]
        ycri=zcli=c3i[1]
        zcri=xcli=c3i[2]
        temp = nn(xcri, 2, ex=c3i)
        ymli,xmri = sortby(temp, Y)
        temp = nn(ycri, 2, ex=c3i)
        ymri,zmli = sortby(temp, X)
        temp = nn(zcri, 2, ex=c3i)
        zmri,xmli = sortby(temp, Y)
        cm9 = [xcri,ycri,zcri,xmri,xmli,ymri,ymli,zmri,zmli]
#.........这里部分代码省略.........
开发者ID:bensondaled,项目名称:three-chamber,代码行数:103,代码来源:ymaze_track.py

示例13: captureTStamp

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
def captureTStamp(files, duration, cod,  fps=0, verbose=True):
    '''
    guarda por un tiempo en minutos (duration) el video levantado desde la
    direccion indicada en el archvo indicado. tambíen archivos con los time
    stamps de cada frame.
    
    files = [ur, saveVideoFile, saveDateFile, saveMillisecondFile]
    duration = time in mintes
    cod = codec
    fps = frames per second for video to be saved
    verbose = print messages to screen
    
    si fpscam=0 trata de llerlo de la captura. para fe hay que especificarla
    
    para opencv '2.4.9.1'
    
    Examples
    --------
    
    from cameraUtils import captureTStamp
    
    # para la FE
    duration = 1 # in minutes
    files = ['rtsp://192.168.1.48/live.sdp',
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_tsFrame.txt"]
    fpsCam = 12
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fps=fpsCam)
    
    # %% para la PTZ
    duration = 0.2 # in minutes
    files = ["rtsp://192.168.1.49/live.sdp",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_tsFrame.txt"]  
    
    fpsCam = 20
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fpsCam)
    
    '''
    
    fcc = fourcc(cod[0],cod[1],cod[2],cod[3]) # Códec de video
    
    if verbose:
        print(files)
        print("Duration",duration,"minutes")
        print("fps",fps)
        print("codec",cod)
    
    # Inicializacion
    tFin = datetime.datetime.now() + datetime.timedelta(minutes=duration)
    
    ts = list()  # timestamp de la captura
    
    # abrir captura
    cap = VideoCapture(files[0])
    while not cap.isOpened():
        cap = VideoCapture(files[0])
    
    print("capture opened")
	# configurar writer
    w = int(cap.get(frame_width))
    h = int(cap.get(frame_height))
    if not fps:
        fps = cap.get(prop_fps)
    #para fe especificar los fps pq toma cualquier cosa con la propiedad
    
    out = VideoWriter(files[1], fcc, fps,( w, h), True)
    
    if verbose:
        print("capture open",cap.isOpened())
        print("frame size",w,h)
        print("output opened",out.isOpened())
    
    if not out.isOpened() or not cap.isOpened():
        out.release()
        cap.release()
        # exit function if unable to open cap or out
        return
    
    s0 = getsize(files[1]) # initial filesize before writing frame
    # Primera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("first frame captured")
    # Segunda captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("second frame captured")
#.........这里部分代码省略.........
开发者ID:sebalander,项目名称:sebaPhD,代码行数:103,代码来源:cameraUtils.py

示例14: main

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
def main():
    cap = VideoCapture("feng.mp4")
    ret, frame = cap.read()
    imwrite("feng1.jpg", frame)
开发者ID:yuansmin,项目名称:somethingusefull,代码行数:6,代码来源:capture_img.py

示例15: imread

# 需要导入模块: from cv2 import VideoCapture [as 别名]
# 或者: from cv2.VideoCapture import read [as 别名]
from moustache import _putmoustache_
from itertools import count

backgroundTreshold = 35

beach = imread('manzara640.png', -1)

video_capture = VideoCapture(0)

if not video_capture.isOpened():
    exit('The Camera is not opened')

counter = count(1)

for i in range(0, 50):
    ret, background = video_capture.read()
    imshow("Video", background)
    waitKey(10)
    print "!!! Step out of the frame !!!"
    print "Background will be detected in %d seconds" % (7 - i)
ret, background = video_capture.read()
background = pyrDown(background)
#background = pyrDown(background)

while True:
    print "Iteration %d" % counter.next()

    ret, temp = video_capture.read()
    #temp = pyrDown(temp)
    frame2 = _putmoustache_(temp)
    frame3 = _putglass_(frame2)
开发者ID:kaanoguzhan,项目名称:ComputerVision,代码行数:33,代码来源:main.py


注:本文中的cv2.VideoCapture.read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。