當前位置: 首頁>>代碼示例>>Python>>正文


Python VideoCapture.release方法代碼示例

本文整理匯總了Python中cv2.VideoCapture.release方法的典型用法代碼示例。如果您正苦於以下問題:Python VideoCapture.release方法的具體用法?Python VideoCapture.release怎麽用?Python VideoCapture.release使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2.VideoCapture的用法示例。


在下文中一共展示了VideoCapture.release方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: Camera

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
class Camera(object):

    def get_settings(self):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before getting settings")
        settings = []
        for prop in global_camera_properties:
            prop_value = self._video_capture.get(prop['value'])
            if prop_value >= 0:
                settings.append({'name': prop['name'], 'value': prop_value})
        return settings

    def set_setting(self, setting, value):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before setting a setting")
        setting_id = filter(lambda x: x['name'] == setting, global_camera_properties)
        if len(setting_id) == 1:
            setting_id = setting_id[0]['value']
        else:
            raise Exception("Setting {} not available".format(setting))
        self._video_capture.set(setting_id, value)

    def read(self):
        (retVal, image) = self._video_capture.read()
        return image

    def start(self):
        self._video_capture = VideoCapture(0)
        self.shape = self.read().shape

    def stop(self):
        self._video_capture.release()
開發者ID:Createcafe3d,項目名稱:peachyscanner,代碼行數:34,代碼來源:camera.py

示例2: start

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
def start():
    '''
    
    '''
    #Load splash screen
    splScr = splash()
    found = []
    #find connected cameras        
    for num in range(10):
        cam = VideoCapture(num)
        cam.open
        #show progress bar 'movement' while the main program find cameras
        splScr.update()
        if not cam.read()[0]:
            del(cam)
        else:
            cam.release()
            found.append(num)
        while gtk.events_pending():
            gtk.main_iteration()
    #destroy splash screen when all cameras are finded
    splScr.destroy()
    print 'connected cameras:', len(found)
    #run main program
    main_gui(found)
    gtk.main()
    return
開發者ID:xDMxosiris,項目名稱:hekate,代碼行數:29,代碼來源:gui_main.py

示例3: caputure

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
def caputure():
    # open Camera
    cam = VideoCapture(0)
    if not cam.isOpened():
        LOGGER.debug('FAILED to open camera!!!')
        return None

    # capture image
    for i in range(100):
        status, img = cam.read()
    if not status:
        LOGGER.debug('FAiLED to capture image!!!')
        return None

    cam.release()
    return img
開發者ID:tlipoma,項目名稱:SouthlakeAWOS,代碼行數:18,代碼來源:Camera.py

示例4: load_background

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
 def load_background(self):
     try:
         bg = np.load(self.fh.make_path('background.npz',mode=self.fh.BL))
         background = bg['computations']
         background_image = bg['image']
     except:
         blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
         valid, background, ts = self.get_frame(blmov, n=-1, blur=True)
         blmov.release()
         
         blmov = VideoCapture(self.fh.get_path(self.fh.BL,self.fh.MOV))
         valid, background_image, ts = self.get_frame(blmov, n=-1, blur=False)
         blmov.release()
         
         np.savez(self.fh.make_path('background.npz',mode=self.fh.BL), computations=background, image=background_image)
     self.background, self.background_image = background, background_image
開發者ID:bensondaled,項目名稱:three-chamber,代碼行數:18,代碼來源:ymaze_track.py

示例5: video_loop

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
def video_loop(aframes_queue,person_queue):
    vc = VideoCapture(0)
    rval, frame = vc.read()
    people = {}
    colors = ((0,0,255),(255,255,0))
    while True:
        rval, frame = vc.read()
        if frame is None:
            c = waitKey(10)
            continue
        aframe = NP.asarray(frame[:,:])
        im = Image.fromarray(frame)
        draw = ImageDraw.Draw(im)
        
        while not person_queue.empty():
            name,rect,name_size = person_queue.get()
            people[name] = {'rect' : rect, 'name_size' : name_size, 
                            'time_found' : time.time()}

        name_counter = 0        
        for name in people.keys():
            if name_counter < 2:
                draw_name(draw, people[name], name, name_counter, colors[name_counter])
            name_counter += 1
            
            if time.time()>people[name]['time_found']+2:
                # stop displaying after 2 seconds
                people.pop(name)
                
        frame2 = NP.array(im)
        imshow('frame',frame2)


        if aframes_queue.empty():
            aframes_queue.put(aframe)
        c = waitKey(1)
        if c == 27: # exit on ESC
            break
    
    vc.release()
    destroyAllWindows()
開發者ID:oran28,項目名稱:video_face_recognise,代碼行數:43,代碼來源:main.py

示例6: load_background

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
 def load_background(self):
     try:
         bg = np.load(os.path.join(self.background_dir,'%s_background.npz'%self.background_name))
         background = bg['computations']
         background_image = bg['image']
     except:
         #print "Acquiring background information..."
         #print os.path.join(self.background_dir, self.background_name+'-cam0.avi')
         try:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam.avi'))
         except:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam0.avi'))
         valid, background = self.get_frame(blmov, n=-1, blur=True)
         blmov.release()
         
         try:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam.avi'))
         except:
             blmov = VideoCapture(os.path.join(self.background_dir, self.background_name+'-cam0.avi'))
         valid, background_image = self.get_frame(blmov, n=-1, blur=False)
         blmov.release()
         
         np.savez(os.path.join(self.background_dir,'%s_background'%self.background_name), computations=background, image=background_image)
     return background, background_image
開發者ID:bensondaled,項目名稱:three-chamber,代碼行數:26,代碼來源:chamber_track.py

示例7: unix_time

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
                    fontColor = (250,250,250)
                    gender1 = gender1+1

                count = gender1 + gender2

                dt = datetime.datetime.now()
                time_milli_secs = unix_time(dt)

                json = {'timestamp' : time_milli_secs, 'gender' : gender, 'count' : count, 'gender1' : gender1, 'gender2' : gender2}

                insert_into_mongo(json)

                
                x1, y1, w1, h1 = faceRegions[indx]
                rectangle(img0,
                          (x1,y1),
                          (x1+w1,y1+h1),
                          (100,255,0),2)
                putText(img=img0,
                            text='Gender: ' + gender,
                            org=(x1,y1+h1-10),
                            fontFace=CV_FONT_HERSHEY_DUPLEX,
                            fontScale=0.75,
                            color=fontColor)
                
    imshow('appDemo', img0) 
    keyPressed = waitKey(2)
destroyAllWindows()
capture.release()

開發者ID:rvpradeep,項目名稱:Jackfruit,代碼行數:31,代碼來源:gendEstApp.py

示例8: camera

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
class camera(object):
    '''
    Object containing camera information
    Call-able, retrieve current frame in camera buffer
    
    User accessible attributes:
        device        system device number
        resolution    camera resolution
        BGRimage      image in BGR format
        HSVimage      image in HSV format
        RGBimage      image in RGB format
        FPS           camera speed in FPS
        
    User accessible methods:
        close         close camera device
    '''
    def __init__(self, cam_num = -1, resolution = (640, 480)):
        '''
        create camera object
            cam_num            device number (integer)
            resolution         image resolution (tuple width x height)
        '''
        self.device = cam_num
        self.resolution = resolution
        self.BGRimage = []
        self.HSVimage = []
        self.RGBimage = []
        self.FPS = [0, 0]
        self.__avr = 0
        #assign and open device
        self.__capture = VideoCapture(cam_num)
        self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0])
        self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1])
        self.__capture.open
        self.__flag = False
        t0 = time()
        self.__flag, self.BGRimage = self.__capture.read()
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = self.FPS[0]
        self.__avr = self.FPS[0]
        print "camera", self.device, "ready @", self.FPS[0], "fps"
        return
    def __call__(self, frame_delay = 0, fast = False):
        '''
        retrieve current frame in camera buffer
            frame_delay        delay the frame decoding (integer)
            fast               if true don't decode image to RGB format (logic)    
        '''
        #set timer to meassure fps
        self.__avr = self.FPS[1]
        t0 = time()
        #try to retrieve current frame
        while not self.__flag:
            if frame_delay > 0:
                for i in xrange(frame_delay + 1):
                    self.__capture.grab()
                self.__flag, self.BGRimage = self.__capture.retrieve()
                del i
            else:
                self.__flag, self.BGRimage = self.__capture.read()
        self.__flag = False
        #decode bgr format to hsv
        self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV)
        if fast:
            self.FPS[0] = 1/(time()-t0)
            self.FPS[1] = (self.FPS[0]+self.__avr)/2
            return
        #decode bgr format to rgb
        self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB)
        self.FPS[0] = 1/(time()-t0)
        self.FPS[1] = (self.FPS[0]+self.__avr)/2
        return
    def __str__(self):
        '''
        return camera information;
            device number
            device resolution
            instant speed
            average speed
        '''
        tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution)
        tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1])
        return tmp
    def __del__(self):
        '''
        when the object is deleted, it closes the device
        '''
        self.close()
        return
    def close(self):
        '''
        close device, making it available to use 
        '''
        #if the device is open then close it
        if self.__capture.isOpened():
            self.__capture.release()
            print "camera", self.device, "closed"
        return
開發者ID:xDMxosiris,項目名稱:hekate,代碼行數:100,代碼來源:api_captureOLD.py

示例9: MouseTracker

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
class MouseTracker(object):
    def __init__(self, mouse, mode,  data_directory='.', diff_thresh=100, resample=8, translation_max=100, smoothing_kernel=19, consecutive_skip_threshold=2, selection_from=[]):
        self.mouse = mouse
        self.data_dir = data_directory
        self.mode = mode
        self.selection_from = selection_from
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel
        self.consecutive_skip_threshold = (37./self.resample) * consecutive_skip_threshold

        # Parameters (you should not vary)
        self.duration = 1
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1

        fh = FileHandler(self.data_dir, self.mouse)
        self.background_name = fh[mode][BACKGROUND][NAME]
        self.background_dir = fh[mode][BACKGROUND][DIR]
        self.trial_name = fh[mode][TRIAL][NAME]
        self.trial_dir = fh[mode][TRIAL][DIR]

        self.background, self.background_image = self.load_background()
        self.height, self.width = np.shape(self.background)
        
        timefile = os.path.join(self.trial_dir, self.trial_name+'-timestamps.json')
        self.time = json.loads(open(timefile,'r').read())
        vidfile = os.path.join(self.trial_dir, self.trial_name+'-cam.avi')
        if not os.path.exists(vidfile):
            vidfile = os.path.join(self.trial_dir, self.trial_name+'-cam0.avi')
        if not os.path.exists(vidfile):
            raise Exception('Movie %s not found.'%vidfile)
        self.mov = VideoCapture(vidfile)

        self.results = {}
        self.results['centers'] = []
        self.results['centers_all'] = []
        self.results['left'] = 0
        self.results['right'] = 0
        self.results['middle'] = 0
        self.results['left_assumed'] = 0
        self.results['right_assumed'] = 0
        self.results['middle_assumed'] = 0
        self.results['skipped'] = 0
        self.results['heat'] = np.zeros(np.shape(self.background))
        self.results['n_frames'] = 0
        self.results['params'] = [self.diff_thresh, self.kernel, self.translation_max, self.resample]
        self.results['params_key'] = ['diff_thresh','kernel','translation_max','resample']

        self.path_l, self.path_r, self.path_c, self.rooms_mask, self.paths_ignore, self.last_center = self.get_pt_selections()
    def end(self):
        np.savez(os.path.join(self.trial_dir,'%s_tracking'%self.trial_name), **self.results)
        savemat(os.path.join(self.trial_dir,'%s_tracking'%self.trial_name), self.results)
        
        self.mov.release()
        destroyAllWindows()
    def get_pt_selections(self):
        valid,first = self.get_frame(self.mov, blur=False, n=30)
        try: #did they select for this trial
            pts = np.load(os.path.join(self.trial_dir, '%s_selections.npz'%self.trial_name))
            pts_l = pts['pts_l']
            pts_r = pts['pts_r']
            pts_c = pts['pts_c']
            pts_mouse = pts['pts_mouse']
            regions_ignore = pts['regions_ignore']
        except:
            found_rooms = False
            found_ignore = False
            for sf in self.selection_from:
                fh = FileHandler(self.data_dir, sf)
                s_trial_name = fh[self.mode][TRIAL][NAME]
                s_trial_dir = fh[self.mode][TRIAL][DIR]
                
                try:
                    pts = np.load(os.path.join(s_trial_dir, '%s_selections.npz'%s_trial_name))
                    
                    if not found_rooms:
                        pts_l = pts['pts_l']
                        pts_r = pts['pts_r']
                        pts_c = pts['pts_c']
                        
                        plimshow(first, cmap=mpl_cm.Greys_r)
                        title('Good room corners? If so, click image, otherwise, close window.')
                        scatter(pts_l[:,0], pts_l[:,1], c='b', marker='o')
                        scatter(pts_r[:,0], pts_r[:,1], c='r', marker='o')
                        scatter(pts_c[:,0], pts_c[:,1], c='g', marker='o')

                        use_rooms = ginput(1)
                        close()
                        if use_rooms.any():
#.........這裏部分代碼省略.........
開發者ID:bensondaled,項目名稱:three-chamber,代碼行數:103,代碼來源:chamber_track.py

示例10: pyrDown

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
    ret, temp = video_capture.read()
    #temp = pyrDown(temp)
    frame2 = _putmoustache_(temp)
    frame3 = _putglass_(frame2)

    frame3 = pyrDown(frame3)
    #frame3 = pyrDown(frame3)

    height, width = frame3.shape[:2]
    for i in range(1, height, 4):
        for j in range(1, width, 4):
            if background[i][j][0] - backgroundTreshold <= frame3[i][j][0] <= background[i][j][0] + backgroundTreshold:
                if background[i][j][0] - backgroundTreshold <= frame3[i][j][1] <= background[i][j][1] + backgroundTreshold:
                    if background[i][j][0] - backgroundTreshold <= frame3[i][j][2] <= background[i][j][2] + backgroundTreshold:
                        for x in range(-2, 2):
                            for y in range(-2, 2):
                                frame3[i+x][j+y][0] = beach[i+x][j+y][0]
                                frame3[i+x][j+y][1] = beach[i+x][j+y][1]
                                frame3[i+x][j+y][2] = beach[i+x][j+y][2]

   #frame3 = pyrUp(frame3)
    frame3 = pyrUp(frame3)
    imshow("Video", frame3)
    waitKey(100)

video_capture.release()
destroyAllWindows()


開發者ID:kaanoguzhan,項目名稱:ComputerVision,代碼行數:29,代碼來源:main.py

示例11: Camera

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
class Camera(object):
    ''' Communicate with the camera.

    Class governing the communication with the camera.

    Parameters
    -----------
    camera : int
        the index of the camera, best taken from func lookForCameras,
        from eyetracker.camera.capture
    dic : dic{propID  value}
        to check corresponding propIDs check
        opencv documentation under the term VideoCapture. 
        They will be set in the moment of object creation.

    Defines
    --------
    self.camera : index of the camera
    self.cap : capturing object
    self.frame : returns a frame from camera
    self.close : closes cap
    self.reOpen : reopens cap
    '''
    def __init__(self, camera, dic=None):
        self.camera = int(camera)
        self.cap = VideoCapture(self.camera)
        if dic:
            for propID, value in dic.iteritems():
                self.cap.set(propID, value)
        first_frame = self.frame()

    def frame(self):
        ''' Read frame from camera.

        Returns
        --------
        frame : np.array
            frame from camera
        '''
        if self.cap.isOpened:
            return self.cap.read()[1]
        else:
            print 'Cap is not opened.'
            return None

    def set(self, **kwargs):
        ''' Set camera parameters.

        Parameters
        -----------
        kwargs : {propID : value}
        '''
        for propID, value in kwargs.iteritems():
            self.cap.set(propID, value)

    def close(self):
        ''' Closes cap, you can reopen it with self.reOpen.
        '''
        self.cap.release()

    def reOpen(self, cameraIndex):
        ''' Reopens cap.
        '''
        self.cap.open(self.camera)
        first_frame = self.frame()
開發者ID:karolaug,項目名稱:eyetracker-ng,代碼行數:67,代碼來源:camera.py

示例12: sleep

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
print "At frame: ", video.get(1)
print "Total frames: ", frame_count, "vs. ", video.get(7)
# sleep(10)
width,height = frame_count, int(np.ceil(frame_count/(16.0/9)))
# barcode = Image.new('RGB', (width, height), (255,255,255))
# draw = ImageDraw.Draw(barcode)
# f = open("barcode.jpg", 'w')
f = open("color_codes.txt", 'a')
condition,frame = video.read()
while condition:
    print "Processing frame %d" % count
    # color = findColor(frame)
    if count % 3 == 0:
        color = findColor(frame)
        f.write(str(color) + "\n")
        # draw.line([(count/3,0), (count/3,height)], fill=tuple(color), width=1)
    count += 1
    condition,frame = video.read()
    # if count == 2:
        # break
    print "%0.3f % complete." % (video.get(1)/video.get(7))
    # barcode.save(f)
    # print "Saved."
# plt.clf()
# plt.imshow(barcode)
# plt.show()

# barcode.save(f)
f.close()
video.release()
print "Total time: %0.5f seconds." % (time() - t)
開發者ID:Jeffery-W,項目名稱:movie-segment,代碼行數:33,代碼來源:loadVideo.py

示例13: captureTStamp

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
def captureTStamp(files, duration, cod,  fps=0, verbose=True):
    '''
    guarda por un tiempo en minutos (duration) el video levantado desde la
    direccion indicada en el archvo indicado. tambíen archivos con los time
    stamps de cada frame.
    
    files = [ur, saveVideoFile, saveDateFile, saveMillisecondFile]
    duration = time in mintes
    cod = codec
    fps = frames per second for video to be saved
    verbose = print messages to screen
    
    si fpscam=0 trata de llerlo de la captura. para fe hay que especificarla
    
    para opencv '2.4.9.1'
    
    Examples
    --------
    
    from cameraUtils import captureTStamp
    
    # para la FE
    duration = 1 # in minutes
    files = ['rtsp://192.168.1.48/live.sdp',
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/vca_test_tsFrame.txt"]
    fpsCam = 12
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fps=fpsCam)
    
    # %% para la PTZ
    duration = 0.2 # in minutes
    files = ["rtsp://192.168.1.49/live.sdp",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_video.avi",
             "/home/alumno/Documentos/sebaPhDdatos/ptz_test_tsFrame.txt"]  
    
    fpsCam = 20
    cod = 'XVID'
    
    captureTStamp(files, duration, cod, fpsCam)
    
    '''
    
    fcc = fourcc(cod[0],cod[1],cod[2],cod[3]) # Códec de video
    
    if verbose:
        print(files)
        print("Duration",duration,"minutes")
        print("fps",fps)
        print("codec",cod)
    
    # Inicializacion
    tFin = datetime.datetime.now() + datetime.timedelta(minutes=duration)
    
    ts = list()  # timestamp de la captura
    
    # abrir captura
    cap = VideoCapture(files[0])
    while not cap.isOpened():
        cap = VideoCapture(files[0])
    
    print("capture opened")
	# configurar writer
    w = int(cap.get(frame_width))
    h = int(cap.get(frame_height))
    if not fps:
        fps = cap.get(prop_fps)
    #para fe especificar los fps pq toma cualquier cosa con la propiedad
    
    out = VideoWriter(files[1], fcc, fps,( w, h), True)
    
    if verbose:
        print("capture open",cap.isOpened())
        print("frame size",w,h)
        print("output opened",out.isOpened())
    
    if not out.isOpened() or not cap.isOpened():
        out.release()
        cap.release()
        # exit function if unable to open cap or out
        return
    
    s0 = getsize(files[1]) # initial filesize before writing frame
    # Primera captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("first frame captured")
    # Segunda captura
    ret, frame = cap.read()
    if ret:
        t = datetime.datetime.now()
        ts.append(t)
        out.write(frame)
        if verbose:
            print("second frame captured")
#.........這裏部分代碼省略.........
開發者ID:sebalander,項目名稱:sebaPhD,代碼行數:103,代碼來源:cameraUtils.py

示例14: MouseTracker

# 需要導入模塊: from cv2 import VideoCapture [as 別名]
# 或者: from cv2.VideoCapture import release [as 別名]
class MouseTracker(object):
    def __init__(self, mouse, n=1, data_dir='.', diff_thresh=80, resample=1, translation_max=50, smoothing_kernel=19, consecutive_skip_threshold=0.08, selection_from=[], point_mode='auto'):
        self.mouse = mouse
        self.n = n
        self.data_dir = data_dir
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel

        # Parameters (you should not vary)
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1
        
        self.fh = FileHandler(self.data_dir, self.mouse, self.n)

        self.framei = 0
        self.load_time()
        self.consecutive_skip_threshold = (self.fs/self.resample) * consecutive_skip_threshold
        self.load_background()
        self.height, self.width = self.background.shape
        self.mov = VideoCapture(self.fh.get_path(self.fh.TRIAL, self.fh.MOV))
        self.mov.read();self.time=self.time[1:]
        #self.get_frame(self.mov,n=40) #MUST ADJUST TIME IF USING THIS
        self.load_pts(mode=point_mode)
        self.make_rooms()

    def end(self):
        self.results = dict(pos=self.pos, time=np.array(self.t)-self.t[0], guess=self.guess, heat=self.heat, contour=self.contour, pct_xadj=self.pct_xadj)
        np.savez(self.fh.make_path('tracking.npz'), **self.results)
        savemat(self.fh.make_path('tracking.mat'), self.results)
        
        self.mov.release()
        destroyAllWindows()
    def man_update(self, d):
        for k,v in d.items():
            setattr(self,k,v)
    def make_rooms(self):
        self.path_x = mpl_path.Path(self.pts[np.array([self.xmli,self.xoli,self.xori,self.xmri])])
        self.path_y = mpl_path.Path(self.pts[np.array([self.ymli,self.yoli,self.yori,self.ymri])])
        self.path_z = mpl_path.Path(self.pts[np.array([self.zmli,self.zoli,self.zori,self.zmri])])

        #experimental: hand in frame on x room
        self.path_x_adj = mpl_path.Path(self.pts[np.array([self.xoli,self.xoli_adj,self.xori_adj,self.xori])])
        self.xadj_mask = np.zeros((self.height,self.width))
        for iy in xrange(self.xadj_mask.shape[0]):
            for ix in xrange(self.xadj_mask.shape[1]):
                self.xadj_mask[iy,ix] = self.path_x_adj.contains_point([ix,iy])
        self.xadj_idxs = np.squeeze(np.argwhere(self.xadj_mask==True))

        self.border_mask = np.zeros((self.height,self.width))
        pthpts = self.pts[np.array([self.yoli_adj,self.yori_adj,self.ymri,self.ycri,self.zmli,self.zoli_adj,self.zori_adj,self.zmri,self.zcri,self.xmli,self.xoli_adj,self.xori_adj,self.xmri,self.xcri,self.ymli])]
        pth = mpl_path.Path(pthpts)
        for iy in xrange(self.border_mask.shape[0]):
            for ix in xrange(self.border_mask.shape[1]):
                self.border_mask[iy,ix] = pth.contains_point([ix,iy])
    def classify_pts(self):
        #stored in (x,y)
        #c: center
        #m: middle
        #o: out
        #x: bottom arm, y: left arm, z: right arm
        #l: left when going down arm, r: right when going down arm
        #pt is: [x/y/z c/m/o l/r]
        X,Y = 0,1
        def nn(pidx,n,ex=[]):
            #idxs of n closest pts to p, excluding all idxs in ex
            p = self.pts[pidx]
            ds = np.array([dist(pp,p) for pp in self.pts])
            idxs =  np.argsort(ds)
            idxs = np.array([i for i in idxs if i not in ex])
            return idxs[:n]
        def sortby(pidxs, dim):
            pts = self.pts[np.array(pidxs)]
            return pidxs[np.argsort(pts[:,dim])]
        dists = np.array([dist(self.pts_c, p) for p in self.pts])
        c3i = self.c3i[np.argsort(self.pts[self.c3i][:,0])]
        m6i = self.m6i
        o6i = self.o6i
        
        #classify them:
        xcri=ycli=c3i[0]
        ycri=zcli=c3i[1]
        zcri=xcli=c3i[2]
        temp = nn(xcri, 2, ex=c3i)
        ymli,xmri = sortby(temp, Y)
        temp = nn(ycri, 2, ex=c3i)
        ymri,zmli = sortby(temp, X)
        temp = nn(zcri, 2, ex=c3i)
        zmri,xmli = sortby(temp, Y)
        cm9 = [xcri,ycri,zcri,xmri,xmli,ymri,ymli,zmri,zmli]
#.........這裏部分代碼省略.........
開發者ID:bensondaled,項目名稱:three-chamber,代碼行數:103,代碼來源:ymaze_track.py


注:本文中的cv2.VideoCapture.release方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。