当前位置: 首页>>代码示例>>Python>>正文


Python cv2.CAP_PROP_FRAME_COUNT属性代码示例

本文整理汇总了Python中cv2.CAP_PROP_FRAME_COUNT属性的典型用法代码示例。如果您正苦于以下问题:Python cv2.CAP_PROP_FRAME_COUNT属性的具体用法?Python cv2.CAP_PROP_FRAME_COUNT怎么用?Python cv2.CAP_PROP_FRAME_COUNT使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cv2的用法示例。


在下文中一共展示了cv2.CAP_PROP_FRAME_COUNT属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: encode

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def encode(self, video_in, data, video_out):
        assert len(data) == self.data_dim

        video_in = cv2.VideoCapture(video_in)
        width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
        length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))

        data = torch.FloatTensor([data]).cuda()
        video_out = cv2.VideoWriter(
            video_out, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (width, height))

        for i in tqdm(range(length)):
            ok, frame = video_in.read()
            frame = torch.FloatTensor([frame]) / 127.5 - 1.0      # (L, H, W, 3)
            frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda()  # (1, 3, L, H, W)
            wm_frame = self.encoder(frame, data)                       # (1, 3, L, H, W)
            wm_frame = torch.clamp(wm_frame, min=-1.0, max=1.0)
            wm_frame = (
                (wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5
            ).detach().cpu().numpy().astype("uint8")
            video_out.write(wm_frame)

        video_out.release() 
开发者ID:DAI-Lab,项目名称:RivaGAN,代码行数:26,代码来源:rivagan.py

示例2: imshow

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def imshow(self,bbox,cls,probs):
        #print("bbox ",bbox)
        cap = cv2.VideoCapture("/media/aiuser/78C2F86DC2F830CC1/ava_v2.2/preproc/train_clips/clips/b5pRYl_djbs/986.mp4")
        # frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        # key_frame_start = int(frame_count * 0.3)
        # key_frame_end = int(frame_count * 0.9)
        while (cap.isOpened()):
            ret, frame = cap.read()
            self.draw_bboxes_and_show(frame, bbox, cls,probs=probs)
            # self.draw_bboxes_and_show(frame,frame_num, real_bboxes, real_lables, key_frame_start, key_frame_end,color=(255,0,255))
            if ret == True:
                # 显示视频
                cv2.imshow('Frame', frame)
                # 刷新视频
                cv2.waitKey(0)
                # 按q退出
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break
            else:
                break 
开发者ID:MagicChuyi,项目名称:SlowFast-Network-pytorch,代码行数:22,代码来源:evaluator.py

示例3: get_video_capture_and_frame_count

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def get_video_capture_and_frame_count(path):
  assert os.path.isfile(
    path), "Couldn't find video file:" + path + ". Skipping video."
  cap = None
  if path:
    cap = cv2.VideoCapture(path)

  assert cap is not None, "Couldn't load video capture:" + path + ". Skipping video."

  # compute meta data of video
  if hasattr(cv2, 'cv'):
    frame_count = int(cap.get(cv2.cv.CAP_PROP_FRAME_COUNT))
  else:
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

  return cap, frame_count 
开发者ID:ferreirafabio,项目名称:video2tfrecord,代码行数:18,代码来源:video2tfrecord.py

示例4: count_frames

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def count_frames(self, check_validity=False):
        offset = 0
        if self.vid_path.endswith('.flv'):
            offset = -1
        unverified_frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + offset
        if check_validity:
            verified_frame_count = 0
            for i in range(unverified_frame_count):
                self.cap.set(cv2.CAP_PROP_POS_FRAMES, i)
                if not self.cap.grab():
                    logging.warning("VideoIter:: >> frame (start from 0) {} corrupted in {}".format(i, self.vid_path))
                    break
                verified_frame_count = i + 1
            self.frame_count = verified_frame_count
        else:
            self.frame_count = unverified_frame_count
        assert self.frame_count > 0, "VideoIter:: Video: `{}' has no frames".format(self.vid_path)
        return self.frame_count 
开发者ID:facebookresearch,项目名称:dmc-net,代码行数:20,代码来源:video_iterator.py

示例5: search_switch

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def search_switch(self, cap, key, thresh=0.5):
        left = 0
        right = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1

        cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2))

        while right > left + 1:
            _, frame = cap.read()

            image = self.extractor.prepare_frame(frame, self.extractor.image_dict[key][0])

            if not self.extractor.exists(image, self.extractor.image_dict[key][1][0], thresh):
                left = int((right + left) / 2)
            else:
                right = int((right + left) / 2)

            cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2))

        cap.set(cv2.CAP_PROP_POS_FRAMES, left)

        return left 
开发者ID:shahar603,项目名称:SpaceXtract,代码行数:23,代码来源:util.py

示例6: skip_from_launch

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def skip_from_launch(self, cap, key, time, thresh=None):
        """
        Move the capture to T+time (time can be negative) and returns the frame index.
        :param cap: OpenCV capture
        :param time: delta time from launch to skip to
        :return: index of requested frame
        """        
        if thresh is None:
            thresh = self.extractor.image_dict[key][2]

        number_of_frames = int(cap.get(cv2.CAP_PROP_FPS) * time) + self.search_switch(cap, key, thresh)

        number_of_frames = max(number_of_frames, 0)
        number_of_frames = min(number_of_frames, cap.get(cv2.CAP_PROP_FRAME_COUNT))

        cap.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames)

        return number_of_frames 
开发者ID:shahar603,项目名称:SpaceXtract,代码行数:20,代码来源:util.py

示例7: find_anchor

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def find_anchor(self, cap, start=0, end=1, maxiter=10):
        if not isinstance(self.extractor, RelativeExtract):
            return False

        original_location = cap.get(cv2.CAP_PROP_POS_FRAMES)

        for i in range(maxiter):
            pos = random.uniform(start, end)

            cap.set(cv2.CAP_PROP_POS_FRAMES,  pos*cap.get(cv2.CAP_PROP_FRAME_COUNT))
            _, frame = cap.read()

            if self.extractor.prepare_image_dict(frame):
                return True

        cap.set(cv2.CAP_PROP_POS_FRAMES, original_location)

        return False 
开发者ID:shahar603,项目名称:SpaceXtract,代码行数:20,代码来源:util.py

示例8: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def __init__(self, capture, tfnet, show_flag=False, save_flag=False):
        self.capture = capture
        self.tfnet = tfnet
        self.save_flag = save_flag
        self.show_flag = show_flag

        # Predetermined parameters that have been tested to work best.
        self.end_fnum = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT))
        self.max_num_match_frames = 30
        self.min_match_length_s = 30
        self.num_match_frames = 5
        self.step_size = 60
        self.timeline_empty_thresh = 4


    #### STAGE DETECTOR TESTS ##################################################

    # Run the standard stage detector test over the entire video. 
开发者ID:jpnaterer,项目名称:smashscan,代码行数:20,代码来源:stage_detection.py

示例9: test_insert_image_frame_count

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def test_insert_image_frame_count(self):
        with deepstar_path():
            image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg'  # noqa

            args = ['main.py', 'insert', 'videos', 'image', image_0001]
            opts = {'frame-count': '5'}

            with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
                VideoCommandLineRouteHandler().handle(args, opts)

            # files
            video_path = VideoFile.path(VideoModel().select(1)[2])

            vc = cv2.VideoCapture(video_path)

            try:
                self.assertTrue(vc.isOpened())
                self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5)
            finally:
                vc.release() 
开发者ID:zerofox-oss,项目名称:deepstar,代码行数:22,代码来源:test_video_command_line_route_handler.py

示例10: test_create_one_video_file_from_one_image_file

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def test_create_one_video_file_from_one_image_file(self):
        image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg'  # noqa

        with tempdir() as tempdir_:
            video_path = os.path.join(tempdir_, 'video.mp4')

            ret = create_one_video_file_from_one_image_file(image_0001,
                                                            video_path)

            self.assertTrue(ret)

            vc = cv2.VideoCapture(video_path)

            try:
                self.assertTrue(vc.isOpened())
                self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 1)
            finally:
                vc.release() 
开发者ID:zerofox-oss,项目名称:deepstar,代码行数:20,代码来源:test_video.py

示例11: test_create_one_video_file_from_many_image_files

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def test_create_one_video_file_from_many_image_files(self):
        image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg'  # noqa

        with tempdir() as tempdir_:
            video_path = os.path.join(tempdir_, 'video.mp4')

            def image_paths():
                for _ in range(0, 5):
                    yield image_0001

            ret = create_one_video_file_from_many_image_files(image_paths, video_path)  # noqa

            self.assertTrue(ret)

            vc = cv2.VideoCapture(video_path)

            try:
                self.assertTrue(vc.isOpened())
                self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5)
            finally:
                vc.release() 
开发者ID:zerofox-oss,项目名称:deepstar,代码行数:23,代码来源:test_video.py

示例12: getInfo

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def getInfo(sourcePath):
    cap = cv2.VideoCapture(sourcePath)
    info = {
        "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT),
        "fps": cap.get(cv2.CAP_PROP_FPS),
        "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        "codec": int(cap.get(cv2.CAP_PROP_FOURCC))
    }
    cap.release()
    return info

#
# Extracts one frame for every second second of video.
# Effectively compresses a video down into much less data.
# 
开发者ID:tafsiri,项目名称:filmstrip,代码行数:18,代码来源:preprocess.py

示例13: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def __init__(self, cfg, args):
        # Create a VideoCapture object
        self.cfg = cfg
        self.args = args
        use_cuda = self.args.use_cuda and torch.cuda.is_available()

        if not use_cuda:
            warnings.warn(UserWarning("Running in cpu mode!"))

        self.detector = build_detector(cfg, use_cuda=use_cuda)
        self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
        self.class_names = self.detector.class_names

        self.vdo = cv2.VideoCapture(self.args.input)
        self.status, self.frame = None, None
        self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT))
        self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))

        self.output_frame = None

        self.thread = ThreadPoolExecutor(max_workers=1)
        self.thread.submit(self.update) 
开发者ID:ZQPei,项目名称:deep_sort_pytorch,代码行数:25,代码来源:rtsp_threaded_tracker.py

示例14: __enter__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def __enter__(self):
        self.vdo.open(self.args.VIDEO_PATH)
        self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT))
        self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))

        video_details = {'frame_width': self.im_width,
                         'frame_height': self.im_height,
                         'frame_rate': self.args.write_fps,
                         'video_name': self.args.VIDEO_PATH}
        codec = cv2.VideoWriter_fourcc(*'XVID')
        self.writer = cv2.VideoWriter(self.output_file, codec, self.args.write_fps,
                                      (self.im_width, self.im_height))
        self.logger.add_video_details(**video_details)

        assert self.vdo.isOpened()
        return self 
开发者ID:ZQPei,项目名称:deep_sort_pytorch,代码行数:19,代码来源:ped_det_server.py

示例15: VideoToSequence

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 别名]
def VideoToSequence(path, time):
    video = cv2.VideoCapture(path)
    dir_path = 'frames_tmp'
    os.system("rm -rf %s" % dir_path)
    os.mkdir(dir_path)
    fps = int(video.get(cv2.CAP_PROP_FPS))
    length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
    print('making ' + str(length) + ' frame sequence in ' + dir_path)
    i = -1
    while (True):
        (grabbed, frame) = video.read()
        if not grabbed:
            break
        i = i + 1
        index = IndexHelper(i*time, len(str(time*length)))
        cv2.imwrite(dir_path + '/' + index + '.png', frame)
        # print(index)
    return [dir_path, length, fps] 
开发者ID:CM-BF,项目名称:FeatureFlow,代码行数:20,代码来源:sequence_run.py


注:本文中的cv2.CAP_PROP_FRAME_COUNT属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。