当前位置: 首页>>代码示例>>Python>>正文


Python cv2.accumulateWeighted方法代码示例

本文整理汇总了Python中cv2.accumulateWeighted方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.accumulateWeighted方法的具体用法?Python cv2.accumulateWeighted怎么用?Python cv2.accumulateWeighted使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.accumulateWeighted方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prediction

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
开发者ID:cristianpb,项目名称:object-detection,代码行数:19,代码来源:motion.py

示例2: run_avg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def run_avg(image, accumWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, accumWeight)

#---------------------------------------------
# To segment the region of hand in the image
#--------------------------------------------- 
开发者ID:Gogul09,项目名称:gesture-recognition,代码行数:15,代码来源:recognize.py

示例3: run_avg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def run_avg(image, aWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, aWeight)

#---------------------------------------------
# To segment the region of hand in the image
#--------------------------------------------- 
开发者ID:Gogul09,项目名称:gesture-recognition,代码行数:15,代码来源:segment.py

示例4: process_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def process_image(self, frame):
        frame = imutils.resize(frame, width=min(500, frame.shape[1]))
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            print('Starting background model...')
            self.avg = gray.copy().astype('float')
            return frame

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)

        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        for c in cnts:
            if cv2.contourArea(c) < 5000:
                continue

            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        
        return frame 
开发者ID:isaaxug,项目名称:study-picamera-examples,代码行数:28,代码来源:motion_detector.py

示例5: motion_detector

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def motion_detector(self, img):
        occupied = False
        # resize the frame, convert it to grayscale, and blur it
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (15, 15), 0)
     
        if self.avg is None:
            print("[INFO] starting background model...")
            self.avg = gray.copy().astype("float")
     
        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))
        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, 5, 255,
            cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[1]
     
        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                pass
            occupied = True
     
        return occupied 
开发者ID:smellslikeml,项目名称:ActionAI,代码行数:34,代码来源:demo.py

示例6: processFrame

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def processFrame( self, frame_in ):
        # version 1 - moving average
        if self._avg == None:
            self._avg = np.float32( frame_in )
        cv2.accumulateWeighted( frame_in, self._avg, self._speed )
        background = cv2.convertScaleAbs( self._avg )
        active_area = cv2.absdiff( frame_in, background )

        #version 2 - MOG - Gausian Mixture-based Background/Foreground Segmentation Algorithm
        fgmask = self._fgbg.apply( frame_in ,learningRate = 0.01 )
        #active_area = cv2.bitwise_and( frame_in, frame_in, mask = fgmask )

        return fgmask 
开发者ID:jchrisweaver,项目名称:vidpipe,代码行数:15,代码来源:BackgroundRemove.py

示例7: run_avg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def run_avg(image, aWeight):
    global bg
    # initialize the background
    if bg is None:
        bg = image.copy().astype("float")
        return

    # compute weighted average, accumulate it and update the background
    cv2.accumulateWeighted(image, bg, aWeight) 
开发者ID:SparshaSaha,项目名称:Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network,代码行数:11,代码来源:ContinuousGesturePredictor.py

示例8: detect_change_contours

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def detect_change_contours(self, img):
        """
        Detect changed contours in frame
        :param img: current image
        :return: True if it's time to capture
        """
        # convert to gray
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            self.avg = gray.copy().astype("float")
            return False

        # add to accumulation model and find the change
        cv2.accumulateWeighted(gray, self.avg, 0.5)
        frame_delta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold, dilate and find contours
        thresh = cv2.threshold(frame_delta, self.config["delta_threshold"], 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # find largest contour
        largest_contour = self.get_largest_contour(cnts)

        if largest_contour is None:
            return False

        (x, y, w, h) = cv2.boundingRect(largest_contour)

        # if the contour is too small, return false
        if w > self.maxWidth or w < self.minWidth or h > self.maxHeight or h < self.minHeight:
            return False
        else:
            if self.get_fake_time() - self.lastPhotoTime >= self.config['min_photo_interval_s']:
                return True

        return False 
开发者ID:interactionresearchstudio,项目名称:NaturewatchCameraServer,代码行数:41,代码来源:ChangeDetector.py

示例9: getBackground

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def getBackground(self):
        """
        **SUMMARY**

        Get Background of the Image. For more info read
        http://opencvpython.blogspot.in/2012/07/background-extraction-using-running.html

        **PARAMETERS**
        No Parameters

        **RETURNS**

        Image - SimpleCV.ImageClass.Image

        **EXAMPLE**

        >>> while (some_condition):
            ... img1 = cam.getImage()
            ... ts = img1.track("camshift", ts1, img, bb)
            ... img = img1
        >>> ts.getBackground().show()
        """
        imgs = self.trackImages(cv2_numpy=True)
        f = imgs[0]
        avg = np.float32(f)
        for img in imgs[1:]:
            f = img
            cv2.accumulateWeighted(f,avg,0.01)
            res = cv2.convertScaleAbs(avg)
        return Image(res, cv2image=True) 
开发者ID:sightmachine,项目名称:SimpleCV2,代码行数:32,代码来源:TrackSet.py

示例10: watchDog

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def watchDog(self, imgInput):
        timestamp = datetime.datetime.now()
        gray = cv2.cvtColor(imgInput, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if self.avg is None:
            print("[INFO] starting background model...")
            self.avg = gray.copy().astype("float")
            return 'background model'

        cv2.accumulateWeighted(gray, self.avg, 0.5)
        self.frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        self.thresh = cv2.threshold(self.frameDelta, 5, 255,
            cv2.THRESH_BINARY)[1]
        self.thresh = cv2.dilate(self.thresh, None, iterations=2)
        self.cnts = cv2.findContours(self.thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        self.cnts = imutils.grab_contours(self.cnts)
        # print('x')
        # loop over the contours
        for c in self.cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 5000:
                continue
     
            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (self.mov_x, self.mov_y, self.mov_w, self.mov_h) = cv2.boundingRect(c)
            self.drawing = 1
            
            self.motionCounter += 1
            #print(motionCounter)
            #print(text)
            self.lastMovtionCaptured = timestamp
            led.setColor(255,78,0)
            # switch.switch(1,1)
            # switch.switch(2,1)
            # switch.switch(3,1)

        if (timestamp - self.lastMovtionCaptured).seconds >= 0.5:
            led.setColor(0,78,255)
            self.drawing = 0
            # switch.switch(1,0)
            # switch.switch(2,0)
            # switch.switch(3,0)
        self.pause() 
开发者ID:adeept,项目名称:Adeept_RaspTank,代码行数:51,代码来源:camera_opencv.py

示例11: camshift_face_track

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import accumulateWeighted [as 别名]
def camshift_face_track():
    face_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    ALPHA = 0.5

    camera = cv2.VideoCapture(0)
    face_box = None

    #wait till first face box is available
    print "Waiting to get first face frame..."
    while face_box is None:
        grabbed, frame = camera.read()
        if not grabbed:
            raise EnvironmentError("Camera read failed!")
        image_prev = cv2.pyrDown(frame)
        face_box = utils.detect_face(face_cascade, image_prev)

    print "Face found!"
    prev_frames = image_prev.astype(np.float32)
    while (True):
        _, frame = camera.read()
        image_curr = cv2.pyrDown(frame)
        cv2.accumulateWeighted(image_curr, prev_frames, ALPHA)
        image_curr = cv2.convertScaleAbs(prev_frames)
        if face_box is not None:
            face_box = camshift_track(image_curr, face_box, termination)
            cv2.rectangle(image_curr, (face_box[0], face_box[1]), (face_box[0]+face_box[2], face_box[1] + face_box[3]),
                          (255, 0,0), 2)
            # cv2.rectangle(image_curr, (box[0], box[1]), (box[0]+box[2], box[1] + box[3]),
            #               (0, 0,255), 2)

        else:
            face_box = utils.detect_face(face_cascade, image_curr)

        cv2.imshow("Output", image_curr)
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break

        elif key & 0xFF == ord('r'):
            print "Reseting face detection!"
            face_box = None 
开发者ID:shekkizh,项目名称:ImageProcessingProjects,代码行数:44,代码来源:FaceBlurring.py


注:本文中的cv2.accumulateWeighted方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。