当前位置: 首页>>代码示例>>Python>>正文


Python CaptureManager.enterFrame方法代码示例

本文整理汇总了Python中managers.CaptureManager.enterFrame方法的典型用法代码示例。如果您正苦于以下问题:Python CaptureManager.enterFrame方法的具体用法?Python CaptureManager.enterFrame怎么用?Python CaptureManager.enterFrame使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在managers.CaptureManager的用法示例。


在下文中一共展示了CaptureManager.enterFrame方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Cameo

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True)
        # self._curveFilter = filters.EmbossFilter()

    def run(self):
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame

            filters.strokeEdge(frame, frame)
            # self._curveFilter.apply(frame, frame)

            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress(self, keycode):
        if keycode == 32: #space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: #tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        if keycode ==27:#esc
            self._windowManager.destroyWindow()
开发者ID:hooloong,项目名称:gitpython,代码行数:30,代码来源:cameo.py

示例2: CameoDepth

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class CameoDepth(Cameo):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
                                             self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            self._faceTracker.update(frame)
            faces = self._faceTracker.faces
            rects.swapRects(frame, frame,
                            [face.faceRect for face in faces])
            
            filters.strokeEdges(frame, frame)
            self._curveFilter.apply(frame, frame)
            
            if self._shouldDrawDebugRects:
                self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
开发者ID:billcary,项目名称:OpenCV_with_Python,代码行数:33,代码来源:cameo.py

示例3: Cameo

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Cameo(object):
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
        self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()

        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            # TODO: Filter the frame (Chapter 3).
            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def onKeypress (self, keycode):
        """Handle a keypress.
        space -> Take a screenshot.
        tab -> Start/stop recording a screencast.
        escape -> Quit.
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                 self._captureManager.startWritingVideo('screencast.avi')
            else:
                 self._captureManager.stopWritingVideo()
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
开发者ID:richardqlin,项目名称:natural_language,代码行数:34,代码来源:camera.py

示例4: Cameo

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Cameo(object):
    
    def __init__(self):
        self._windowManager = WindowManager('Cameo',
                                             self.onKeypress)
        self._captureManager = CaptureManager(
            cv2.VideoCapture(0), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                
                self._faceTracker.update(frame)
                faces = self._faceTracker.faces
                rects.swapRects(frame, frame,
                                [face.faceRect for face in faces])
            
                filters.strokeEdges(frame, frame)
                self._curveFilter.apply(frame, frame)
                
                if self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        escape -> Quit.
        
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(
                    'screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
开发者ID:sarvex,项目名称:pycv,代码行数:58,代码来源:cameo.py

示例5: CameoDouble

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class CameoDouble(Cameo):
    
    def __init__(self):
        Cameo.__init__(self)
        self._hiddenCaptureManager = CaptureManager(
            cv2.VideoCapture(1))
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            self._hiddenCaptureManager.enterFrame()
            frame = self._captureManager.frame
            hiddenFrame = self._hiddenCaptureManager.frame
            
            if frame is not None:
                if hiddenFrame is not None:
                    self._faceTracker.update(hiddenFrame)
                    hiddenFaces = self._faceTracker.faces
                    self._faceTracker.update(frame)
                    faces = self._faceTracker.faces
                
                    i = 0
                    while i < len(faces) and i < len(hiddenFaces):
                        rects.copyRect(
                            hiddenFrame, frame, hiddenFaces[i].faceRect,
                            faces[i].faceRect)
                        i += 1
                
                filters.strokeEdges(frame, frame)
                self._curveFilter.apply(frame, frame)
                
                if hiddenFrame is not None and self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
            
            self._captureManager.exitFrame()
            self._hiddenCaptureManager.exitFrame()
            self._windowManager.processEvents()
开发者ID:sarvex,项目名称:pycv,代码行数:41,代码来源:cameo.py

示例6: Gui

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]

#.........这里部分代码省略.........
        logger.debug("closing")
        if self.ebb:
            self.ebb.closeSerial()
        if self._cap.isWritingVideo:
            self._cap.stopWritingVideo()
        
        for h in logger.handlers:
            if type(h) == logging.FileHandler:
                h.close()
        
        try:
            cv2.destroyWindow('gaussian')
        except Exception as e:
            pass
            
    def isColor( self, imgIn ):
        s = np.shape(imgIn)
        if s == 3:
            try:
                ncolor = np.shape(imgIn)[2]
                boolt = int(ncolor) > 2
                return boolt
            except IndexError:
                if self.existingVid:
                    logger.warning('Video has ended')
                    self.existingVid = False
                    self.resetAll()
                else:
                    logger.warning('Issue with video input')
                    self.resetAll()
        else:
            return False
   
    def play( self ):
        
        if self.showImage:
            ## Get image from camera
            self._cap.enterFrame()
            self.currentFrame = self._cap.getFrame()
        
            self.color = self.isColor(self.currentFrame)

            t1 = time.time()

            if self.color:
                try:
                    self.currentFrame = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
                except TypeError:
                    logger.exception("No Frame")
                finally:
                    self._cap.exitFrame()
            else:
                self._cap.exitFrame()
            
            if not self.runTracking: #no finder
                if self.showImage: #yes capture
                    self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                    self.ui.videoFrame.setScaledContents(True)
            else: # yes finder
                ## Stop if too dark!!!
                if time.time() - self.startTrackTime < 5:
                    self.firstAvgPixIntensity = np.mean(self.currentFrame)
                    logger.warning('first avg int: %d' % self.firstAvgPixIntensity)
               ## Tracking procedure
                if time.time() - self._lastCheck >= self._sampleFreq:
                    gaussian = self._wormFinder.processFrame( self.currentFrame )
                    if gaussian is not None:
                        cv2.imshow( 'gaussian', gaussian )
                        
                    if self.motorsOn: #yes motorized
                        ## Stop motors if too dark
                        self.currentAvgPixIntensity = np.mean(self.currentFrame)
                        logger.warning('current avg int: %d' % self.currentAvgPixIntensity)
                        if self.currentAvgPixIntensity < self.firstAvgPixIntensity - 50:
                            logger.warning('Darkening of picture: motors turned off')
                            if self.motorsOn:
                                self.motorized()
                            if self._cap.isWritingVideo:
                                self.record()
                        else:
                          self._wormFinder.decideMove()
                    self._lastCheck = time.time()
                    self._wormFinder.drawDebugCropped( self.currentFrame)
                    
                    self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                    self.ui.videoFrame.setScaledContents(True)

            if self._cap._fpsEstimate:
               self.ui.fps.setText( 'FPS: %0.2f' % ( self._cap._fpsEstimate ))

            if self.startRecTime:
                elapsedSec = time.time() - self.startRecTime
                elapsed = time.strftime("%H.%M.%S", time.gmtime(elapsedSec) ) 
                self.ui.lcdNumber.setNumDigits(8)
                self.ui.lcdNumber.display( elapsed )
            else:
                self.ui.lcdNumber.display("") 
        return
        
    '''
开发者ID:vsimonis,项目名称:gui-tracker,代码行数:104,代码来源:main.py

示例7: Tracker

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Tracker ( object ):
    
    def __init__( self, method, src ):

        ### Sensitivity of tracker params
        self._sampleFreq = 0.1 #in sec
        
        ### Set Camera params
        #self.resolution = (640, 480 )
        self.resolution = (1280, 960)
        source = {
            0:0, 
            1:1, 
            2:'led_move1.avi', 
            3:'screencast.avi', 
            4:'screencast 1.avi',
            5: 'shortNoBox.avi',
            6: 'longNoBox.avi',
            7: 'H299.avi',
            8: 'testRec.avi',
            9: 'longDemo.avi',
            10: 'worm2014_05_05-12-44-53.avi'
            }
        self.color = True
        self.captureSource = source[int(src)]
        
        ### Timing initialization
        self._startTime = time.time()
        self._lastCheck = self._startTime - self._sampleFreq

        ### Display params
        self.mirroredPreview = False


        ### Initialize Objects       

        ##### Windows

        self._rawWindow = WindowManager( 'RawFeed', self.onKeypress )

        ### Capture -- resolution set here
        self._cap = CaptureManager( 
            cv2.VideoCapture(self.captureSource), 
            self._rawWindow, 
            self.mirroredPreview, self.resolution)
        
        actualCols, actualRows = self._cap.getResolution()
        ## from here on out use this resolution 
        
        ### Arguments for finder
        self.finderArgs = {
            'method' : method,
            'gsize' :  45,
            'gsig' : 9,
            'window' : 3,
            'boundBoxRow' : 150,
            'boundBoxCol' : 150,
            'limRow' : 100,
            'limCol' : 100,
            'MAXONEFRAME': 500,
            'REFPING' : 600000,
            'MAXREF': 1000,
            'capCols':actualCols,
            'capRows': actualRows,
            'color' : self.color
            }

        self._wormFinder = WormFinder( **self.finderArgs )     

        ##### Debugging
        self._overlayWindow = WindowManager( 'Overlay', self.onKeypress )
        self.motorsOn = False



    def run( self ):

        # Show windows
        self._rawWindow.createWindow()
        self._overlayWindow.createWindow()

        while self._rawWindow.isWindowCreated:
            self._cap.enterFrame()
            frame = self._cap.frame

            # Probably not useful, removes errors when playing from video
#            if not self._captureManager.gotFrame:
#                self.shutDown()
#                break

            # Display raw frame to rawWindow
            
            t1 = time.time()
            # Get frame
            frame = self._cap.frame

            # Show frame to raw feed
            self._rawWindow.show(frame)

            # If tracking is enabled or motors are on, start tracking
#.........这里部分代码省略.........
开发者ID:vsimonis,项目名称:worm2,代码行数:103,代码来源:tracker.py

示例8: BenFinder

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class BenFinder(object):
    BACKGROUND_VIDEO_FNAME = "background_video.png"
    BACKGROUND_DEPTH_FNAME = "background_depth.png"
 
    def __init__(self):
        self._windowManager = WindowManager('benFinder',
                                             self.onKeypress)
        device = depth.CV_CAP_FREENECT
        #device = 1
        print "device=%d" % device
        self._captureManager = CaptureManager(
            device, self._windowManager, True)
        self._captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._backgroundSubtract = False
        self._autoBackgroundSubtract = False
        self._curveFilter = filters.BGRPortraCurveFilter()
        self.background_video_img = None
        self.background_depth_img = None
        self.autoBackgroundImg = None
        self._ts = TimeSeries()
        self._frameCount = 0
    
    def loadBackgroundImages(self):
        """ Load the background images to be used for background subtraction
        from disk files.
        """
        self.background_video_img = cv2.imread(BenFinder.BACKGROUND_VIDEO_FNAME)
        self.background_depth_img = cv2.imread(BenFinder.BACKGROUND_DEPTH_FNAME,
                                               cv2.CV_LOAD_IMAGE_GRAYSCALE)

    def showBackgroundImage(self):
        """ Display the background image used for subtraction in a separate window
        """
        # Load the images from disk if necessary.
        if (not self.background_depth_img or not self.background_video_img):
            self.loadBackgroundImages()
        # Display the correct image
        if (self._autoBackgroundSubtract):
            cv2.imshow("Auto Background Image", self.autoBackgroundImg)
        else:
            if (self._captureManager.channel == \
                depth.CV_CAP_OPENNI_DEPTH_MAP):
                cv2.imshow("background_depth_img",self.background_depth_img)
            elif (self._captureManager.channel == \
                  depth.CV_CAP_OPENNI_BGR_IMAGE):
                cv2.imshow("background_video_img",self.background_video_img)
            else:
                print "Error - Invalid Channel %d." % \
                    self._captureManager.channel

    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self._backgroundSubtract):
                    if (self._autoBackgroundSubtract):
                        if (self._captureManager.channel == \
                            depth.CV_CAP_OPENNI_DEPTH_MAP):
                            if (self.autoBackgroundImg == None):
                                self.autoBackgroundImg = numpy.float32(frame)
                            # First work out the region of interest by 
                            #    subtracting the fixed background image 
                            #    to create a mask.
                            absDiff = cv2.absdiff(frame,self.background_depth_img)
                            benMask,maskArea = filters.getBenMask(absDiff,8)

                            cv2.accumulateWeighted(frame,
                                                   self.autoBackgroundImg,
                                                   0.05)
                            # Convert the background image into the same format
                            # as the main frame.
                            bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                                     alpha=1.0)
                            # Subtract the background from the frame image
                            cv2.absdiff(frame,bg,frame)
                            # Scale the difference image to make it more sensitive
                            # to changes.
                            cv2.convertScaleAbs(frame,frame,alpha=100)
                            #frame = cv2.bitwise_and(frame,frame,dst=frame,mask=benMask)
                            frame = cv2.multiply(frame,benMask,dst=frame,dtype=-1)
                            bri = filters.getMean(frame,benMask)
                            #print "%4.0f, %3.0f" % (bri[0],self._captureManager.fps)
                            self._ts.addSamp(bri[0])
                            if (self._frameCount < 15):
                                self._frameCount = self._frameCount +1
                            else:
                                self._ts.plotRawData()
                                self._ts.findPeaks()
                                self._frameCount = 0
                        else:
                            print "Auto background subtract only works for depth images!"
                    else:
                        if (self._captureManager.channel == \
#.........这里部分代码省略.........
开发者ID:OpenSeizureDetector,项目名称:OpenSeizureDetector,代码行数:103,代码来源:benFinder_debug.py

示例9: CaptureManager

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with OpenSeizureDetector.  If not, see <http://www.gnu.org/licenses/>.
##############################################################################
#
from managers import CaptureManager
import depth

device = depth.CV_CAP_FREENECT
#device = 1
print "device=%d" % device
captureManager = CaptureManager(
    device, None, True)
captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE

captureManager.enterFrame()

#frame = captureManager.frame

captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP
captureManager.writeImage("kintest1.png")
captureManager.exitFrame()
captureManager.enterFrame()
captureManager.channel = depth.CV_CAP_OPENNI_BGR_IMAGE
captureManager.writeImage("kintest2.png")
captureManager.exitFrame()

开发者ID:attawit,项目名称:OpenSeizureDetector,代码行数:31,代码来源:kintest.py

示例10: Gui

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]

#.........这里部分代码省略.........
    def set( self ):
        if self.ui.widthLine.text():
            try:
                float( self.ui.widthLine.text() )
            except ValueError as e:
                QtWidgets.QMessageBox.information( self, "Invalid Width", "Cannot convert that width to a float! Make sure you enter a number (decimals accepted)")
                return
       
        if self.ui.widthLine.text() and self.ui.comboBox.currentText():
            self.resolutionSelect()
            # Instantiate motors    
            try:
                self.ebb = EasyEBB( resolution = self.actualRes, sizeMM = float(self.ui.widthLine.text() ) )
                  

                self.setAble('settings', False)
                self.setAble('motors', True)
                self.setAble('center', True)
            except serial.SerialException as e:
                logger.exception(e)
                QtWidgets.QMessageBox.information( self, "Motors Issue",
                                                "Please connect motors or restart them, having issues connecting now")
        else:
            QtWidgets.QMessageBox.information( self, "Validation",
                                                  "Please enter a measurement for the width and double-check your resolution choice") 

        

    def closeEvent ( self, event):
        logger.debug("closing")
        if self._cap._capture.isOpened():
            self._cap._capture.release()
        #if self._cap:
        #    cv2.destroyWindow("Camera Display")
        #    cv2.destroyAllWindows()
        if self.ebb:
            self.ebb.closeSerial()
    
    def isColor( self, imgIn ):
        s = np.shape(imgIn)
        if  np.size(s) == 3:
            try:
                ncolor = np.shape(imgIn)[2]
                boolt = int(ncolor) > 2
                return boolt
            except IndexError:
                QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")
                logger.error("Something wrong with camera input")
                self.showImage = False
        else:
            return False
         
        
    def play( self ):
        if self.showImage:
            self._cap.enterFrame()
            self.currentFrame = self._cap.frame
        
            self.color = self.isColor(self.currentFrame)
            t1 = time.time()
            if self.color:
                try:
                    self.currentFrame = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
                    if self.currentFrame is not None:
                        self.currentFrame = self.p.draw(self.currentFrame, 'black-1')
                        self.currentFrame = self.c.draw(self.currentFrame, 'black-1')
                   
                        self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                        self.ui.videoFrame.setScaledContents(True)
                    else:
                        QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")
                        logger.error("Something wrong with camera input")
                
                    #cv2.imshow( "Camera Display",self.currentFrame)
                except TypeError:
                    logger.exception("No Frame")
                    QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")

                finally:
                    self._cap.exitFrame()
            else:
                try:
                    if self.currentFrame is not None:
                        self.currentFrame = self.p.draw(self.currentFrame, 'black-1')
                        self.currentFrame = self.c.draw(self.currentFrame, 'black-1')
                        self.ui.videoFrame.setPixmap(self._cap.convertFrame(self.currentFrame))
                        self.ui.videoFrame.setScaledContents(True)
                    else:
                        QtWidgets.QMessageBox.information( self, "Camera Issue", "Please Check Camera Source")
                        logger.error("Something wrong with camera input")
                        logger.exception("No Frame")
                        self.showImage = False

                        #cv2.imshow( "Camera Display",self.currentFrame)
                except TypeError:
                    logger.exception("No Frame")
                finally:
                    self._cap.exitFrame()

            self._cap.exitFrame()
开发者ID:vsimonis,项目名称:gui-calibration,代码行数:104,代码来源:main.py

示例11: Facedetect

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Facedetect(object):
    
    def __init__(self):
        self._windowManager = WindowManager('Facedetect', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(camera_nr), self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = True
        self._curveFilter = filters.BGRPortraCurveFilter()

    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                
                t = cv2.getTickCount()
                self._faceTracker.update(frame)
                faces = self._faceTracker.faces
                t = cv2.getTickCount() - t
                print("time taken for detection = %gms" % (t/(cv2.getTickFrequency())*1000.))
                
                # uncomment this line for swapping faces
                #rects.swapRects(frame, frame, [face.faceRect for face in faces])
                
                #filters.strokeEdges(frame, frame)
                #self._curveFilter.apply(frame, frame)
                
                if self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
                    self._faceTracker.drawLinesFromCenter(frame)
                
            self._captureManager.exitFrame()
            self._windowManager.processEvents()
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        escape -> Quit.
        
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(
                    'screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
            # When everything is done, release the capture
            self._captureManager.release()
开发者ID:StefQM,项目名称:facedetect,代码行数:63,代码来源:facedetect.py

示例12: Tracker

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Tracker(object):
    def __init__(self, method, src):
        self.color = True
        self.motorsOn = False

        ### Sensitivity of tracker params
        self._sampleFreq = 0.1  # in sec

        ### Set Camera params
        # self.resolution = (640, 480 )
        self.resolution = (1280, 960)
        source = {
            0: 0,
            1: 1,
            2: "led_move1.avi",
            3: "screencast.avi",
            4: "screencast 1.avi",
            5: "shortNoBox.avi",
            6: "longNoBox.avi",
            7: "H299.avi",
            8: "testRec.avi",
            9: "longDemo.avi",
        }
        self.captureSource = source[int(src)]

        ### Timing initialization
        self._startTime = time.time()
        self._lastCheck = self._startTime - self._sampleFreq

        ### Display params
        self.mirroredPreview = False

        ### Initialize Objects

        ##### Windows

        self._rawWindow = WindowManager("RawFeed", self.onKeypress)

        ### Capture -- resolution set here
        self._cap = CaptureManager(
            cv2.VideoCapture(self.captureSource), self._rawWindow, self.mirroredPreview, self.resolution
        )

        actualCols, actualRows = self._cap.getResolution()
        self.centerPt = utils.Point(actualCols / 2, actualRows / 2)

        ## from here on out use this resolution
        boundCols = 600
        boundRows = 600
        ### Arguments for finder
        # --> Pairs are always COLS, ROWS !!!!!!!
        self.finderArgs = {
            "method": method,
            "gsize": 45,
            "gsig": 9,
            "window": 3,
            "MAXONEFRAME": 500,
            "REFPING": 600000,
            "MAXREF": 1000,
            "captureSize": utils.Rect(actualCols, actualRows, self.centerPt),
            "cropRegion": utils.Rect(100, 100, self.centerPt),
            "decisionBoundary": utils.Rect(boundCols, boundRows, self.centerPt),
            "color": self.color,
            "motorsOn": self.motorsOn,
        }

        self._wormFinder = WormFinder(**self.finderArgs)

        ##### Debugging
        #        self._gaussianWindow = WindowManager('Gaussian', self.onKeypress)
        self._overlayWindow = WindowManager("Overlay", self.onKeypress)

    def run(self):

        # Show windows
        self._rawWindow.createWindow()
        self._overlayWindow.createWindow()
        i = 0
        while self._rawWindow.isWindowCreated:
            self._cap.enterFrame()
            frame = self._cap.frame

            # Probably not useful, removes errors when playing from video
            #            if not self._captureManager.gotFrame:
            #                self.shutDown()
            #                break

            # Display raw frame to rawWindow

            t1 = time.time()
            # Get frame
            frame = self._cap.frame

            # Show frame to raw feed
            self._rawWindow.show(frame)

            # If tracking is enabled or motors are on, start tracking
            if time.time() - self._lastCheck >= self._sampleFreq:
                if self.finderArgs["method"] in ["lazyc", "lazyd", "lazy"]:
                    self.gaussian = self._wormFinder.processFrame(frame)
#.........这里部分代码省略.........
开发者ID:vsimonis,项目名称:worm3,代码行数:103,代码来源:tracker.py

示例13: Browser

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Browser(object):
    
    def __init__(self,video_source):  
        self._windowManager = WindowManager('Browser', self.onKeypress)
        self._captureManager = CaptureManager(video_source, self._windowManager, True)
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False
        self._curveFilter = filters.BGRPortraCurveFilter()
    
    def run(self):
        """Run the main loop."""
        self._windowManager.createWindow()
        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame
            
            if frame is not None:
                print "got frame" 
                self._faceTracker.update(frame)
                faces = self._faceTracker.faces
                rects.swapRects(frame, frame,
                                [face.faceRect for face in faces])
            
                #filters.strokeEdges(frame, frame)
                #self._curveFilter.apply(frame, frame)
                
                if self._shouldDrawDebugRects:
                    self._faceTracker.drawDebugRects(frame)
            else:
                print "got None frame"
                print "press any key to exit."
                cv2.waitKey(0)
                break
            self._captureManager.exitFrame()
            waitkey_time=1
            if self._captureManager._video_source!=0:   
                waitkey_time=500
            self._windowManager.processEvents(waitkey_time)
    
    def onKeypress(self, keycode):
        """Handle a keypress.
        
        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        x      -> Start/stop drawing debug rectangles around faces.
        escape -> Quit.
        
        """
        if keycode == 32: # space
            self._captureManager.writeImage('screenshot.png')
        elif keycode == 9: # tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo(
                    '/Users/xcbfreedom/Documents/screencast.avi')
            else:
                self._captureManager.stopWritingVideo()
        elif keycode == 120: # x
            self._shouldDrawDebugRects = \
                not self._shouldDrawDebugRects
        elif keycode == 27: # escape
            self._windowManager.destroyWindow()
开发者ID:hphp,项目名称:Kaggle,代码行数:63,代码来源:browse_video.py

示例14: BenFinder

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class BenFinder(object):
    configFname = "config.ini"
    configSection = "benFinder"

    ALARM_STATUS_OK = 0   # All ok, no alarms.
    ALARM_STATUS_WARN = 1 # Warning status
    ALARM_STATUS_FULL = 2 # Full alarm status. 
    ALARM_STATUS_NOT_FOUND = 3 # Benjamin not found in image 
                               # (area below config area_threshold parameter)

    def __init__(self,save=False, inFile = None):
        print "benFinder.__init__()"
        print os.path.realpath(__file__)
        configPath = "%s/%s" % (os.path.dirname(os.path.realpath(__file__)),
                                self.configFname)
        print configPath
        self.cfg = ConfigUtil(configPath,self.configSection)

        self.debug = self.cfg.getConfigBool("debug")
        if (self.debug): print "Debug Mode"

        self._wkdir = self.cfg.getConfigStr("working_directory")
        if (self.debug): print "working_directory=%s\n" % self._wkdir
        self._tmpdir = self.cfg.getConfigStr("tmpdir")
        if (self.debug): print "tmpdir=%s\n" % self._tmpdir


        # Check if we are running from live kinect or a file.
        if (inFile):
            device = depth.CV_CAP_FILE
        else:
            device = depth.CV_CAP_FREENECT

        # Initialise the captureManager
        self._captureManager = CaptureManager(
            device, None, True, inFile=inFile)
        self._captureManager.channel = depth.CV_CAP_OPENNI_DEPTH_MAP

        # If we are runnign from a file, use the first frame as the
        # background image.
        if (inFile):
            self.saveBgImg()

        # If we have asked to save the background image, do that, and exit,
        # otherwise initialise the seizure detector.
        if (save):
            self.saveBgImg()
        else:
            self.loadBgImg()
            self.autoBackgroundImg = None
            self._status = self.ALARM_STATUS_OK
            self._ts = TimeSeries(tslen=self.cfg.getConfigInt("timeseries_length"))
            self._frameCount = 0
            self._outputFrameCount = 0
            self._nPeaks = 0
            self._ts_time = 0
            self._rate = 0
            self._ws = webServer.benWebServer(self)
            self._ws.setBgImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("background_depth")))
            self._ws.setChartImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("chart_fname")))
            self._ws.setRawImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("raw_image_fname")))
            self._ws.setMaskedImg("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("masked_image_fname")))
            self._ws.setDataFname("%s/%s" % (self._tmpdir,
                    self.cfg.getConfigStr("data_fname")))
            self._ws.setAnalysisResults({})
            webServer.setRoutes(self._ws)
            self.run()
    
    def run(self):
        """Run the main loop."""
        while(True):
            self._captureManager.enterFrame()

            frame = self._captureManager.frame
            
            if frame is not None:
                if (self.autoBackgroundImg == None):
                    self.autoBackgroundImg = numpy.float32(frame)
                rawFrame = frame.copy()
                # First work out the region of interest by 
                #    subtracting the fixed background image 
                #    to create a mask.
                #print frame
                #print self._background_depth_img
                absDiff = cv2.absdiff(frame,self._background_depth_img)
                benMask,maskArea = filters.getBenMask(absDiff,8)

                cv2.accumulateWeighted(frame,
                                       self.autoBackgroundImg,0.05)
                # Convert the background image into the same format
                # as the main frame.
                #bg = self.autoBackgroundImg
                bg = cv2.convertScaleAbs(self.autoBackgroundImg,
                                         alpha=1.0)
                # Subtract the background from the frame image
                cv2.absdiff(frame,bg,frame)
#.........这里部分代码省略.........
开发者ID:OpenSeizureDetector,项目名称:OpenSeizureDetector,代码行数:103,代码来源:benFinder.py

示例15: Cameo

# 需要导入模块: from managers import CaptureManager [as 别名]
# 或者: from managers.CaptureManager import enterFrame [as 别名]
class Cameo(object):

    def __init__(self):
        self._windowManager = WindowManager('Cameo', self.onKeypress)
        self._captureManager = CaptureManager(cv2.VideoCapture(0),
                    self._windowManager, True)
        self._curveFilter = filters.BGRProviaCurveFilter()
        self._faceTracker = FaceTracker()
        self._shouldDrawDebugRects = False

    def run(self):
        """ Run the main loop """

        self._windowManager.createWindow()
        print("Window '{}' Created".format(self._windowManager.windowName))
        print("\n{}\n{}\n{}\n{}".format("Controls:",
                "space   --> Take a screenshot",
                "tab     --> Start/stop recording a screencast",
                "escape  --> Quit"))

        while self._windowManager.isWindowCreated:
            self._captureManager.enterFrame()
            frame = self._captureManager.frame

            self._faceTracker.update(frame)
            faces = self._faceTracker.faces
            rects.swapRects(frame, frame, [face.faceRect for face in faces])

            # Add filtering to the frame
            filters.strokeEdges(frame,frame)
            self._curveFilter.apply(frame,frame)

            if self._shouldDrawDebugRects:
                self._faceTracker.drawDebugRects(frame)

            self._captureManager.exitFrame()
            self._windowManager.processEvents()

    def stop(self):
        print("[CAMEO] closing all processes")
        self._captureManager._capture.release()
        self._windowManager.destroyWindow()


    def onKeypress(self, keycode):

        """ Handle a keypress

        space   --> Take a screenshot
        tab     --> Start/stop recording a screencast
        x       --> Toggle drawing debug rectangles around faces
        escape  --> Quit
        """

        if keycode == 32: # Space
            self._captureManager.writeImage('screenshot.png');
            print("Writing image to file....")
        elif keycode == 9: # Tab
            if not self._captureManager.isWritingVideo:
                self._captureManager.startWritingVideo('screencast.avi')
                print("Writing video to file...")
            else:
                self._captureManager.stopWritingVideo()
                print("Stopped writing video")
        elif keycode == 120: # x
            self._shouldDrawDebugRects = not self._shouldDrawDebugRects
            print("Toggled drawing rectangles")
        elif keycode == 27: # escape
            print("Closing Window...")
            self._windowManager.destroyWindow()
开发者ID:UcheEke,项目名称:pyOCVExamples,代码行数:72,代码来源:cameo.py


注:本文中的managers.CaptureManager.enterFrame方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。