本文整理匯總了Python中cv2.CAP_PROP_FRAME_COUNT屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CAP_PROP_FRAME_COUNT屬性的具體用法?Python cv2.CAP_PROP_FRAME_COUNT怎麽用?Python cv2.CAP_PROP_FRAME_COUNT使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.CAP_PROP_FRAME_COUNT屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: encode
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def encode(self, video_in, data, video_out):
assert len(data) == self.data_dim
video_in = cv2.VideoCapture(video_in)
width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))
data = torch.FloatTensor([data]).cuda()
video_out = cv2.VideoWriter(
video_out, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (width, height))
for i in tqdm(range(length)):
ok, frame = video_in.read()
frame = torch.FloatTensor([frame]) / 127.5 - 1.0 # (L, H, W, 3)
frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda() # (1, 3, L, H, W)
wm_frame = self.encoder(frame, data) # (1, 3, L, H, W)
wm_frame = torch.clamp(wm_frame, min=-1.0, max=1.0)
wm_frame = (
(wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5
).detach().cpu().numpy().astype("uint8")
video_out.write(wm_frame)
video_out.release()
示例2: imshow
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def imshow(self,bbox,cls,probs):
#print("bbox ",bbox)
cap = cv2.VideoCapture("/media/aiuser/78C2F86DC2F830CC1/ava_v2.2/preproc/train_clips/clips/b5pRYl_djbs/986.mp4")
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# key_frame_start = int(frame_count * 0.3)
# key_frame_end = int(frame_count * 0.9)
while (cap.isOpened()):
ret, frame = cap.read()
self.draw_bboxes_and_show(frame, bbox, cls,probs=probs)
# self.draw_bboxes_and_show(frame,frame_num, real_bboxes, real_lables, key_frame_start, key_frame_end,color=(255,0,255))
if ret == True:
# 顯示視頻
cv2.imshow('Frame', frame)
# 刷新視頻
cv2.waitKey(0)
# 按q退出
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
示例3: get_video_capture_and_frame_count
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def get_video_capture_and_frame_count(path):
assert os.path.isfile(
path), "Couldn't find video file:" + path + ". Skipping video."
cap = None
if path:
cap = cv2.VideoCapture(path)
assert cap is not None, "Couldn't load video capture:" + path + ". Skipping video."
# compute meta data of video
if hasattr(cv2, 'cv'):
frame_count = int(cap.get(cv2.cv.CAP_PROP_FRAME_COUNT))
else:
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
return cap, frame_count
示例4: count_frames
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def count_frames(self, check_validity=False):
offset = 0
if self.vid_path.endswith('.flv'):
offset = -1
unverified_frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + offset
if check_validity:
verified_frame_count = 0
for i in range(unverified_frame_count):
self.cap.set(cv2.CAP_PROP_POS_FRAMES, i)
if not self.cap.grab():
logging.warning("VideoIter:: >> frame (start from 0) {} corrupted in {}".format(i, self.vid_path))
break
verified_frame_count = i + 1
self.frame_count = verified_frame_count
else:
self.frame_count = unverified_frame_count
assert self.frame_count > 0, "VideoIter:: Video: `{}' has no frames".format(self.vid_path)
return self.frame_count
示例5: search_switch
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def search_switch(self, cap, key, thresh=0.5):
left = 0
right = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1
cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2))
while right > left + 1:
_, frame = cap.read()
image = self.extractor.prepare_frame(frame, self.extractor.image_dict[key][0])
if not self.extractor.exists(image, self.extractor.image_dict[key][1][0], thresh):
left = int((right + left) / 2)
else:
right = int((right + left) / 2)
cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2))
cap.set(cv2.CAP_PROP_POS_FRAMES, left)
return left
示例6: skip_from_launch
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def skip_from_launch(self, cap, key, time, thresh=None):
"""
Move the capture to T+time (time can be negative) and returns the frame index.
:param cap: OpenCV capture
:param time: delta time from launch to skip to
:return: index of requested frame
"""
if thresh is None:
thresh = self.extractor.image_dict[key][2]
number_of_frames = int(cap.get(cv2.CAP_PROP_FPS) * time) + self.search_switch(cap, key, thresh)
number_of_frames = max(number_of_frames, 0)
number_of_frames = min(number_of_frames, cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames)
return number_of_frames
示例7: find_anchor
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def find_anchor(self, cap, start=0, end=1, maxiter=10):
if not isinstance(self.extractor, RelativeExtract):
return False
original_location = cap.get(cv2.CAP_PROP_POS_FRAMES)
for i in range(maxiter):
pos = random.uniform(start, end)
cap.set(cv2.CAP_PROP_POS_FRAMES, pos*cap.get(cv2.CAP_PROP_FRAME_COUNT))
_, frame = cap.read()
if self.extractor.prepare_image_dict(frame):
return True
cap.set(cv2.CAP_PROP_POS_FRAMES, original_location)
return False
示例8: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def __init__(self, capture, tfnet, show_flag=False, save_flag=False):
self.capture = capture
self.tfnet = tfnet
self.save_flag = save_flag
self.show_flag = show_flag
# Predetermined parameters that have been tested to work best.
self.end_fnum = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT))
self.max_num_match_frames = 30
self.min_match_length_s = 30
self.num_match_frames = 5
self.step_size = 60
self.timeline_empty_thresh = 4
#### STAGE DETECTOR TESTS ##################################################
# Run the standard stage detector test over the entire video.
示例9: test_insert_image_frame_count
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def test_insert_image_frame_count(self):
with deepstar_path():
image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa
args = ['main.py', 'insert', 'videos', 'image', image_0001]
opts = {'frame-count': '5'}
with mock.patch.dict(os.environ, {'DEBUG_LEVEL': '0'}):
VideoCommandLineRouteHandler().handle(args, opts)
# files
video_path = VideoFile.path(VideoModel().select(1)[2])
vc = cv2.VideoCapture(video_path)
try:
self.assertTrue(vc.isOpened())
self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5)
finally:
vc.release()
示例10: test_create_one_video_file_from_one_image_file
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def test_create_one_video_file_from_one_image_file(self):
image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa
with tempdir() as tempdir_:
video_path = os.path.join(tempdir_, 'video.mp4')
ret = create_one_video_file_from_one_image_file(image_0001,
video_path)
self.assertTrue(ret)
vc = cv2.VideoCapture(video_path)
try:
self.assertTrue(vc.isOpened())
self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 1)
finally:
vc.release()
示例11: test_create_one_video_file_from_many_image_files
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def test_create_one_video_file_from_many_image_files(self):
image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa
with tempdir() as tempdir_:
video_path = os.path.join(tempdir_, 'video.mp4')
def image_paths():
for _ in range(0, 5):
yield image_0001
ret = create_one_video_file_from_many_image_files(image_paths, video_path) # noqa
self.assertTrue(ret)
vc = cv2.VideoCapture(video_path)
try:
self.assertTrue(vc.isOpened())
self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5)
finally:
vc.release()
示例12: getInfo
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def getInfo(sourcePath):
cap = cv2.VideoCapture(sourcePath)
info = {
"framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT),
"fps": cap.get(cv2.CAP_PROP_FPS),
"width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
"height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
"codec": int(cap.get(cv2.CAP_PROP_FOURCC))
}
cap.release()
return info
#
# Extracts one frame for every second second of video.
# Effectively compresses a video down into much less data.
#
示例13: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def __init__(self, cfg, args):
# Create a VideoCapture object
self.cfg = cfg
self.args = args
use_cuda = self.args.use_cuda and torch.cuda.is_available()
if not use_cuda:
warnings.warn(UserWarning("Running in cpu mode!"))
self.detector = build_detector(cfg, use_cuda=use_cuda)
self.deepsort = build_tracker(cfg, use_cuda=use_cuda)
self.class_names = self.detector.class_names
self.vdo = cv2.VideoCapture(self.args.input)
self.status, self.frame = None, None
self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT))
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.output_frame = None
self.thread = ThreadPoolExecutor(max_workers=1)
self.thread.submit(self.update)
示例14: __enter__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def __enter__(self):
self.vdo.open(self.args.VIDEO_PATH)
self.total_frames = int(cv2.VideoCapture.get(self.vdo, cv2.CAP_PROP_FRAME_COUNT))
self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_details = {'frame_width': self.im_width,
'frame_height': self.im_height,
'frame_rate': self.args.write_fps,
'video_name': self.args.VIDEO_PATH}
codec = cv2.VideoWriter_fourcc(*'XVID')
self.writer = cv2.VideoWriter(self.output_file, codec, self.args.write_fps,
(self.im_width, self.im_height))
self.logger.add_video_details(**video_details)
assert self.vdo.isOpened()
return self
示例15: VideoToSequence
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_COUNT [as 別名]
def VideoToSequence(path, time):
video = cv2.VideoCapture(path)
dir_path = 'frames_tmp'
os.system("rm -rf %s" % dir_path)
os.mkdir(dir_path)
fps = int(video.get(cv2.CAP_PROP_FPS))
length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
print('making ' + str(length) + ' frame sequence in ' + dir_path)
i = -1
while (True):
(grabbed, frame) = video.read()
if not grabbed:
break
i = i + 1
index = IndexHelper(i*time, len(str(time*length)))
cv2.imwrite(dir_path + '/' + index + '.png', frame)
# print(index)
return [dir_path, length, fps]