當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.createBackgroundSubtractorMOG2方法代碼示例

本文整理匯總了Python中cv2.createBackgroundSubtractorMOG2方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.createBackgroundSubtractorMOG2方法的具體用法?Python cv2.createBackgroundSubtractorMOG2怎麽用?Python cv2.createBackgroundSubtractorMOG2使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.createBackgroundSubtractorMOG2方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_background

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def create_background(video_frames):
        # type: (np.ndarray) -> np.ndarray
        """
        Create the background of a video via MOGs.

        :param video_frames: list of ordered frames (i.e., a video).
        :return: the estimated background of the video.
        """
        mog = cv2.createBackgroundSubtractorMOG2()
        for frame in video_frames:
            img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            mog.apply(img)

        # Get background
        background = mog.getBackgroundImage()

        return cv2.cvtColor(background, cv2.COLOR_BGR2RGB) 
開發者ID:aimagelab,項目名稱:novelty-detection,代碼行數:19,代碼來源:shanghaitech.py

示例2: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def __init__(self, vid_file):  # vid_file = 'videos/traffic.avi'
        self.cnt_up = 0
        self.cnt_down = 0
        self.zone1 = (100, 200)
        self.zone2 = (450, 100)

        self.cap = cv2.VideoCapture(vid_file)  # insane

        # Capture the properties of VideoCapture to console
        # for i in range(19):
        #     print(i, self.cap.get(i))

        self.w = self.cap.get(3)
        self.h = self.cap.get(4)
        self.frameArea = self.h * self.w
        self.areaTH = self.frameArea / 200
        print('Area Threshold', self.areaTH)

        # Input/Output Lines
        self.line_up = int(2 * (self.h / 5))
        self.line_down = int(3 * (self.h / 5))

        self.up_limit = int(1 * (self.h / 5))
        self.down_limit = int(4 * (self.h / 5))

        self.line_down_color = (255, 0, 0)
        self.line_up_color = (0, 0, 255)
        self.pt1 = [0, self.line_down]
        self.pt2 = [self.w, self.line_down]
        self.pts_L1 = np.array([self.pt1, self.pt2], np.int32)
        self.pts_L1 = self.pts_L1.reshape((-1, 1, 2))
        self.pt3 = [0, self.line_up]
        self.pt4 = [self.w, self.line_up]
        self.pts_L2 = np.array([self.pt3, self.pt4], np.int32)
        self.pts_L2 = self.pts_L2.reshape((-1, 1, 2))

        self.pt5 = [0, self.up_limit]
        self.pt6 = [self.w, self.up_limit]
        self.pts_L3 = np.array([self.pt5, self.pt6], np.int32)
        self.pts_L3 = self.pts_L3.reshape((-1, 1, 2))
        self.pt7 = [0, self.down_limit]
        self.pt8 = [self.w, self.down_limit]
        self.pts_L4 = np.array([self.pt7, self.pt8], np.int32)
        self.pts_L4 = self.pts_L4.reshape((-1, 1, 2))

        # Create the background subtractor
        self.fgbg = cv2.createBackgroundSubtractorMOG2()

        self.kernelOp = np.ones((3, 3), np.uint8)
        self.kernelOp2 = np.ones((5, 5), np.uint8)
        self.kernelCl = np.ones((11, 11), np.uint8)

        # Variables
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.vehicles = []
        self.max_p_age = 5
        self.pid = 1 
開發者ID:rahatzamancse,項目名稱:Traffic-Rules-Violation-Detection,代碼行數:59,代碼來源:violation_detection.py

示例3: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def __init__(self):
        """Initialize variables used by Detectors class
        Args:
            None
        Return:
            None
        """
        self.fgbg = cv2.createBackgroundSubtractorMOG2() 
開發者ID:srianant,項目名稱:kalman_filter_multi_object_tracking,代碼行數:10,代碼來源:detectors.py

示例4: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def __init__(self, min_accuracy, min_blend_area, kernel_fill=20, dist_threshold=15000, history=400):
        self.min_accuracy = max (min_accuracy, 0.7)
        self.min_blend_area = min_blend_area
        self.kernel_clean = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(4,4))
        self.kernel_fill = np.ones((kernel_fill,kernel_fill),np.uint8)
        self.dist_threshold = dist_threshold
        self.history = history
   


        # read https://docs.opencv.org/3.3.0/d2/d55/group__bgsegm.html#gae561c9701970d0e6b35ec12bae149814

        try:
      
            self.fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=self.history, nmixtures=5, backgroundRatio=0.7, noiseSigma=0)
        except AttributeError as error:
            print ('It looks like your OpenCV version does not include bgsegm. Switching to createBackgroundSubtractorMOG2')
            self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False, history=self.history) 
           

        #self.fgbg = cv2.bgsegm.createBackgroundSubtractorGMG(decisionThreshold=0.98, initializationFrames=10)
        #self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False, history=self.history) 
        #self.fgbg=cv2.bgsegm.createBackgroundSubtractorGSOC(noiseRemovalThresholdFacBG=0.01, noiseRemovalThresholdFacFG=0.0001)
        #self.fgbg=cv2.bgsegm.createBackgroundSubtractorCNT(minPixelStability = 5, useHistory = True, maxPixelStability = 5 *60,isParallel = True)
        #self.fgbg=cv2.createBackgroundSubtractorKNN(detectShadows=False, history=self.history, dist2Threshold = self.dist_threshold)
        #fgbg=cv2.bgsegm.createBackgroundSubtractorLSBP()

        utils.success_print('Background subtraction initialized') 
開發者ID:pliablepixels,項目名稱:zmMagik,代碼行數:30,代碼來源:detect_background.py

示例5: detected_frame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def detected_frame(self, preprocessed_faced_covered_input_frame):
        """Function for removing background from input frame. """
        if self.flag_handler.background_capture_required is True:
            self._bg_model = cv2.createBackgroundSubtractorMOG2(0, self._bg_Sub_Threshold)
            self.flag_handler.background_capture_required = False
        if self._bg_model is not None:
            fgmask = self._bg_model.apply(preprocessed_faced_covered_input_frame, learningRate=self._learning_Rate)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(preprocessed_faced_covered_input_frame, preprocessed_faced_covered_input_frame,
                                  mask=fgmask)
            self._input_frame_with_hand = res[
                                          0:int(
                                              self._cap_region_y_end * preprocessed_faced_covered_input_frame.shape[0]),
                                          int(self._cap_region_x_begin * preprocessed_faced_covered_input_frame.shape[
                                              1]):
                                          preprocessed_faced_covered_input_frame.shape[
                                              1]]  # clip the ROI 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:20,代碼來源:controller.py

示例6: test_contour_extreme_point_tracking

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def test_contour_extreme_point_tracking(self):
        """Test for tracking extreme_points without optical flow (e.g until calibrated).  """
        # setup
        test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')
        test_image = cv2.imread(test_path)

        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)

        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            image = extractor.get_drawn_extreme_contour_points()
            cv2.imshow('test_contour_extreme_point_tracking', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:41,代碼來源:test_extractor.py

示例7: test_max_distance_between_top_ext_point_and_palm_center_point

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def test_max_distance_between_top_ext_point_and_palm_center_point(self):
        """Test if max distance is found correctly. """
        # setup
        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)

        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            # run
            image = extractor.get_drawn_extreme_contour_points()
            cv2.line(image, extractor.palm_center_point, (extractor.ext_top[0], extractor.palm_center_point[
                1] - extractor.max_distance_from_ext_top_point_to_palm_center), (255, 255, 255), thickness=2)
            cv2.imshow('test_max_distance_between_top_ext_point_and_palm_center_point', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:41,代碼來源:test_extractor.py

示例8: test_palm_angle_calculation

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def test_palm_angle_calculation(self):
        """Test if angle is calculated correctly.

        Usage:
            1. press 'b': to calibrate back_ground_remover.
            2. insert hand into frame, so that middle_finger is aligned with the Y axe.
            3. rotate hand 15 degrees left. (degrees should go above 90).
            4. rotate hand 15 degrees right. (degrees should go below 90).
            5. press esc when done.
        """
        # setup
        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
        cap = cv2.VideoCapture(0)

        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            # run
            image = extractor.get_drawn_extreme_contour_points()
            cv2.imshow('test_contour_extreme_point_tracking', image)
            print(extractor.palm_angle_in_degrees)
            flags_handler.keyboard_input = cv2.waitKey(1) 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:48,代碼來源:test_extractor.py

示例9: test_5_second_calibration_time

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def test_5_second_calibration_time(self):
        """Test if 5 second calibration time works correctly according to flags_handler.

        Usage:
            1. press 'b': to calibrate back_ground_remover.
            2. insert hand into frame, center palms_center (white dot) with axes crossing.
            3. wait for #calibration_time (default 5 sec).
            4. press esc

        test: after calibration_time, center circle should be green.
        """
        # setup
        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
        cap = cv2.VideoCapture(0)

        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            # run
            image = extractor.get_drawn_extreme_contour_points()
            cv2.imshow('test_contour_extreme_point_tracking', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:48,代碼來源:test_extractor.py

示例10: test_detector_extract_and_track

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def test_detector_extract_and_track(self):
        """Test if Detector uses tracker object correctly.  """

        # setup
        # Input from camera.
        cv2.namedWindow('test_detector_extract_and_track')
        cap = cv2.VideoCapture(0)
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
        while flags_handler.quit_flag is False:
            """
            Inside loop, update self._threshold according to flags_handler,
            
            Pressing 'c': in order to toggle control (suppose to change contour's color between green and red)
            Pressing 'l': to raise 'land' flag in flags_handler, in order to be able to break loop (with esc)
            Pressing 'z': will make threshold thinner.
            Pressing 'x': will make threshold thicker.            
            Pressing esc: break loop.
            """
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)
            # Clip frames ROI.b
            roi = {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6}
            back_ground_removed_clipped = ImageTestTool.clip_roi(res, roi)

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            # Pipe:
            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            cv2.imshow('test_detector_extract_and_track', extractor.get_drawn_extreme_contour_points())
            keyboard_input = cv2.waitKey(1)
            flags_handler.keyboard_input = keyboard_input

        # teardown
        cap.release()
        cv2.destroyAllWindows() 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:51,代碼來源:test_detector.py

示例11: test_track

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def test_track(self):
        """Test if tracker object tracks correctly after given set of points to track, and a frame."""

        # setup
        cv2.namedWindow('test_track')
        flags_handler = FlagsHandler()
        tracker = None

        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)
            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            max_area_contour = ImageTestTool.get_max_area_contour(back_ground_removed_clipped)

            extLeft, extRight, extTop, extBot = ImageTestTool.get_contour_extreme_points(max_area_contour)
            palm_center = ImageTestTool.get_center_of_mass(max_area_contour)

            if tracker is None:
                points = np.array([extTop, palm_center])

            else:
                points = tracker.points_to_track
                tracker.track(points, back_ground_removed_clipped)
                points = tracker.points_to_track

            ImageTestTool.draw_tracking_points(back_ground_removed_clipped, points)
            cv2.circle(back_ground_removed_clipped, palm_center, 8, (255, 255, 255), thickness=-1)
            cv2.imshow('test_track', back_ground_removed_clipped)
            keyboard_input = cv2.waitKey(1)
            flags_handler.keyboard_input = keyboard_input
            # run
            if flags_handler.background_capture_required is True:
                tracker = None
            if keyboard_input == ord('t'):
                tracker = Tracker(flags_handler, points, back_ground_removed_clipped)

        # teardown
        cap.release()
        cv2.destroyAllWindows() 
開發者ID:GalBrandwine,項目名稱:HalloPy,代碼行數:56,代碼來源:test_tracker.py

示例12: background_subtraction

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import createBackgroundSubtractorMOG2 [as 別名]
def background_subtraction(background_image, foreground_image):
    """Creates a binary image from a background subtraction of the foreground using cv2.BackgroundSubtractorMOG().
    The binary image returned is a mask that should contain mostly foreground pixels.
    The background image should be the same background as the foreground image except not containing the object
    of interest.

    Images must be of the same size and type.
    If not, larger image will be taken and downsampled to smaller image size.
    If they are of different types, an error will occur.

    Inputs:
    background_image       = img object, RGB or binary/grayscale/single-channel
    foreground_image       = img object, RGB or binary/grayscale/single-channel

    Returns:
    fgmask                 = background subtracted foreground image (mask)

    :param background_image: numpy.ndarray
    :param foreground_image: numpy.ndarray
    :return fgmask: numpy.ndarray
    """

    params.device += 1
    # Copying images to make sure not alter originals
    bg_img = np.copy(background_image)
    fg_img = np.copy(foreground_image)
    # Checking if images need to be resized or error raised
    if bg_img.shape != fg_img.shape:
        # If both images are not 3 channel or single channel then raise error.
        if len(bg_img.shape) != len(fg_img.shape):
            fatal_error("Images must both be single-channel/grayscale/binary or RGB")
        # Forcibly resizing largest image to smallest image
        print("WARNING: Images are not of same size.\nResizing")
        if bg_img.shape > fg_img.shape:
            width, height = fg_img.shape[1], fg_img.shape[0]
            bg_img = cv2.resize(bg_img, (width, height), interpolation=cv2.INTER_AREA)
        else:
            width, height = bg_img.shape[1], bg_img.shape[0]
            fg_img = cv2.resize(fg_img, (width, height), interpolation=cv2.INTER_AREA)

    bgsub = cv2.createBackgroundSubtractorMOG2()
    # Applying the background image to the background subtractor first.
    # Anything added after is subtracted from the previous iterations.
    _ = bgsub.apply(bg_img)
    # Applying the foreground image to the background subtractor (therefore removing the background)
    fgmask = bgsub.apply(fg_img)

    # Debug options
    if params.debug == "print":
        print_image(fgmask, os.path.join(params.debug_outdir, str(params.device) + "_background_subtraction.png"))
    elif params.debug == "plot":
        plot_image(fgmask, cmap="gray")

    return fgmask 
開發者ID:danforthcenter,項目名稱:plantcv,代碼行數:56,代碼來源:background_subtraction.py


注:本文中的cv2.createBackgroundSubtractorMOG2方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。