當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.CAP_PROP_POS_MSEC屬性代碼示例

本文整理匯總了Python中cv2.CAP_PROP_POS_MSEC屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CAP_PROP_POS_MSEC屬性的具體用法?Python cv2.CAP_PROP_POS_MSEC怎麽用?Python cv2.CAP_PROP_POS_MSEC使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.CAP_PROP_POS_MSEC屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: extract_features

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def extract_features(video_input_file_path, feature_output_file_path):
    if os.path.exists(feature_output_file_path):
        return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (40, 40), interpolation=cv2.INTER_AREA)
            features.append(image)
            count = count + 1
    unscaled_features = np.array(features)
    print(unscaled_features.shape)
    np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
開發者ID:chen0040,項目名稱:keras-video-classifier,代碼行數:23,代碼來源:frame_extractor.py

示例2: _read_frame_func

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def _read_frame_func(self):
        ret, frame = self.video_capture.read()
        if not ret:
            raise EOFError()

        if self.frame_skip_rt:
            systime_msec = self.get_tick()
            video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
            assert systime_msec >= 0

            skip = video_msec < systime_msec
            while skip:
                ret, frame_ = self.video_capture.read()

                if not ret:
                    break

                frame = frame_
                video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
                skip = video_msec < systime_msec

        return frame 
開發者ID:hasegaw,項目名稱:IkaLog,代碼行數:24,代碼來源:opencv_gstreamer.py

示例3: _read_frame_func

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def _read_frame_func(self):
        ret, frame = self.video_capture.read()
        if not ret:
            raise EOFError()

        if self.frame_skip_rt:
            systime_msec = self.get_tick()
            video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
            assert systime_msec >= 0

            skip = video_msec < systime_msec
            while skip:
                ret, frame_ = self.video_capture.read()

                if not ret:
                    break

                frame = frame_
                video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
                skip = video_msec < systime_msec

        return frame

    # override 
開發者ID:hasegaw,項目名稱:IkaLog,代碼行數:26,代碼來源:opencv_file.py

示例4: extract_vgg16_features_live

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def extract_vgg16_features_live(model, video_input_file_path):
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    count = 0
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    return unscaled_features 
開發者ID:chen0040,項目名稱:keras-video-classifier,代碼行數:23,代碼來源:vgg16_feature_extractor.py

示例5: extract_vgg16_features

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def extract_vgg16_features(model, video_input_file_path, feature_output_file_path):
    if os.path.exists(feature_output_file_path):
        return np.load(feature_output_file_path)
    count = 0
    print('Extracting frames from video: ', video_input_file_path)
    vidcap = cv2.VideoCapture(video_input_file_path)
    success, image = vidcap.read()
    features = []
    success = True
    while success:
        vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))  # added this line
        success, image = vidcap.read()
        # print('Read a new frame: ', success)
        if success:
            img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
            input = img_to_array(img)
            input = np.expand_dims(input, axis=0)
            input = preprocess_input(input)
            feature = model.predict(input).ravel()
            features.append(feature)
            count = count + 1
    unscaled_features = np.array(features)
    np.save(feature_output_file_path, unscaled_features)
    return unscaled_features 
開發者ID:chen0040,項目名稱:keras-video-classifier,代碼行數:26,代碼來源:vgg16_feature_extractor.py

示例6: get_video_frames

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def get_video_frames(video_path, frames_path, start_time_ms, stop_time_ms, pace):
    try:
        cap = cv2.VideoCapture(video_path)
        # Set start position
        cap.set(cv2.CAP_PROP_POS_MSEC, start_time_ms)

        fps = cap.get(cv2.CAP_PROP_FPS)

        # If stop_time_ms == 0, get the entire video
        if stop_time_ms == 0:
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            stop_time_ms = int(frame_count/fps)*1000

        # If pace == 0, get 1 image every FPS/10
        if pace == 0:
            pace = int(1000/(fps/10))

        frames_list = []
        ok = True
        current_frame = 1
        while ok and cap.get(cv2.CAP_PROP_POS_MSEC) <= stop_time_ms:
            ok, frame = cap.read()
            if not ok:
                break
            frame_path = '{}/frame_{:03}.jpg'.format(frames_path, current_frame)
            log.debug('Storing: {}'.format(frame_path))
            cv2.imwrite(frame_path, frame)
            frames_list.append(frame_path)
            current_frame += 1
            cap.set(cv2.CAP_PROP_POS_MSEC, cap.get(cv2.CAP_PROP_POS_MSEC) + pace)

        cap.release()
        cv2.destroyAllWindows()
        return True, frames_list
    except Exception as e:
        log.error(e)
        return False, [] 
開發者ID:singnet,項目名稱:dnn-model-services,代碼行數:39,代碼來源:video_tools.py

示例7: get_current_frame_time

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def get_current_frame_time(video_cap: cv2.VideoCapture) -> float:
    # same as get_current_frame_id, take good care of them
    return video_cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 
開發者ID:williamfzc,項目名稱:stagesepx,代碼行數:5,代碼來源:toolbox.py

示例8: get_frame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def get_frame():
    """
    Get a frame from a video file.

    :return: (ndarray, float) (Loaded frame, time in seconds).
    """
    global _CAP, _FPS

    to_return_frame = None

    if _CAP is None:
        print("Error on getting frame. cv2.VideoCapture is not initialized.")
    else:
        try:
            if _CAP.isOpened():
                # Skip frames
                for i in range(int(_MAX_FPS / _FPS)):
                    _CAP.grab()

                is_valid_frame, to_return_frame = _CAP.retrieve()

                if not is_valid_frame:
                    to_return_frame = None
        except Exception as e:
            print("Error on getting a frame. Please, double-check if the video file is not corrupted.")
            print("Supported file format: MPEG-4 (*.mp4).")
            print("Check whether working versions of ffmpeg or gstreamer is installed.")
            raise e

    return to_return_frame, (_CAP.get(cv2.CAP_PROP_POS_MSEC) / 1000) 
開發者ID:siqueira-hc,項目名稱:Efficient-Facial-Feature-Learning-with-Wide-Ensemble-based-Convolutional-Neural-Networks,代碼行數:32,代碼來源:uimage.py

示例9: _get_current_timestamp_func

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def _get_current_timestamp_func(self):
        video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)

        if video_msec is None:
            return self.get_tick()

        return video_msec

    # override 
開發者ID:hasegaw,項目名稱:IkaLog,代碼行數:11,代碼來源:opencv_gstreamer.py

示例10: _get_current_timestamp_func

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def _get_current_timestamp_func(self):
        if self.video_capture is None:
            return self.get_tick()

        video_msec = self.video_capture.get(cv2.CAP_PROP_POS_MSEC)
        return video_msec or self.get_tick()


    # override 
開發者ID:hasegaw,項目名稱:IkaLog,代碼行數:11,代碼來源:opencv_file.py

示例11: set_pos_msec

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def set_pos_msec(self, pos_msec):
        """Moves the video position to |pos_msec| in msec."""
        if self.video_capture:
            self.video_capture.set(cv2.CAP_PROP_POS_MSEC, pos_msec)

    # override 
開發者ID:hasegaw,項目名稱:IkaLog,代碼行數:8,代碼來源:opencv_file.py

示例12: frame_iterator

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def frame_iterator(filename, every_ms=1000, max_num_frames=300):
  """Uses OpenCV to iterate over all frames of filename at a given frequency.

  Args:
    filename: Path to video file (e.g. mp4)
    every_ms: The duration (in milliseconds) to skip between frames.
    max_num_frames: Maximum number of frames to process, taken from the
      beginning of the video.

  Yields:
    RGB frame with shape (image height, image width, channels)
  """
  video_capture = cv2.VideoCapture()
  if not video_capture.open(filename):
    print >> sys.stderr, 'Error: Cannot open video file ' + filename
    return
  last_ts = -99999  # The timestamp of last retrieved frame.
  num_retrieved = 0

  while num_retrieved < max_num_frames:
    # Skip frames
    while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts:
      if not video_capture.read()[0]:
        return

    last_ts = video_capture.get(CAP_PROP_POS_MSEC)
    has_frames, frame = video_capture.read()
    if not has_frames:
      break
    yield frame
    num_retrieved += 1 
開發者ID:google,項目名稱:youtube-8m,代碼行數:33,代碼來源:extract_tfrecords_main.py

示例13: split_visual

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def split_visual(self,visual_video, fps, fps_ratio, output_folder='visual_frames'):
        '''
        Splits video into frames based on the actual fps, and time between frames of the thermal sequence.
        There is a sync issue where the thermal fps, and visual fps don't have an integer LCM/if LCM is v large. Have to try motion interpolation to fix this
        '''

        output_folder = Path(output_folder)
        output_folder.mkdir(exist_ok=True)
        vid = cv.VideoCapture(visual_video)
        skip_frames = round(fps_ratio)
        total_frames = vid.get(cv.CAP_PROP_FRAME_COUNT)
        current_frame = 0
        thermal_fps = fps * (1/fps_ratio)
        thermal_time = 1/thermal_fps
        logger.info(f'Time between frames for Thermal SEQ: {thermal_time}')
        # Uncomment below lines if you need total time of visual video
        # vid.set(cv.CAP_PROP_POS_AVI_RATIO,1)
        # total_time = vid.get(cv.CAP_PROP_POS_MSEC)
        last_save_time = -1*thermal_time #So that it saves the 0th frame
        idx=0
        while current_frame < total_frames:
            current_frame = vid.get(cv.CAP_PROP_POS_FRAMES)  
            try:
                current_time = (1/fps)*current_frame
            except:
                current_time = 0
            ret,frame = vid.read()
            if ret:
                if (current_time - last_save_time)*1000 >= ((thermal_time*1000)-5):
                    # logger.info(f'Current Time: {current_time}  Last save time: {last_save_time}')
                    cv.imwrite(str(output_folder/f"{idx}.jpg"), frame)
                    idx+=1
                    last_save_time=current_time        
        return True 
開發者ID:detecttechnologies,項目名稱:Thermal_Image_Analysis,代碼行數:36,代碼來源:CThermal.py

示例14: run

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def run(self):
        idx_frame = 0
        pbar = tqdm(total=self.total_frames + 1)
        while self.vdo.grab():
            if idx_frame % args.frame_interval == 0:
                _, ori_im = self.vdo.retrieve()
                timestamp = self.vdo.get(cv2.CAP_PROP_POS_MSEC)
                frame_id = int(self.vdo.get(cv2.CAP_PROP_POS_FRAMES))
                self.logger.add_frame(frame_id=frame_id, timestamp=timestamp)
                self.detection(frame=ori_im, frame_id=frame_id)
                self.save_frame(ori_im)
                idx_frame += 1
            pbar.update()
        self.logger.json_output(self.json_output) 
開發者ID:ZQPei,項目名稱:deep_sort_pytorch,代碼行數:16,代碼來源:ped_det_server.py

示例15: _time_stamp

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_POS_MSEC [as 別名]
def _time_stamp(self):
        if self._use_wall_clock:
            now = time.time()
            return now - self._start_time
        time_s = self.capture.get(CAP_PROP_POS_MSEC) / 1e3
        return time_s 
開發者ID:gilestrolab,項目名稱:ethoscope,代碼行數:8,代碼來源:cameras.py


注:本文中的cv2.CAP_PROP_POS_MSEC屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。