本文整理匯總了Python中cv2.CAP_PROP_FOURCC屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CAP_PROP_FOURCC屬性的具體用法?Python cv2.CAP_PROP_FOURCC怎麽用?Python cv2.CAP_PROP_FOURCC使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.CAP_PROP_FOURCC屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: getInfo
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def getInfo(sourcePath):
cap = cv2.VideoCapture(sourcePath)
info = {
"framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT),
"fps": cap.get(cv2.CAP_PROP_FPS),
"width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
"height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
"codec": int(cap.get(cv2.CAP_PROP_FOURCC))
}
cap.release()
return info
#
# Extracts one frame for every second second of video.
# Effectively compresses a video down into much less data.
#
示例2: create_writer
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def create_writer(self, height, width):
fps = self.cap.get(cv2.CAP_PROP_FPS)
logging.info('cap fps=%f' % fps)
path = os.path.expanduser(os.path.expandvars(self.args.output))
if self.args.fourcc:
fourcc = cv2.VideoWriter_fourcc(*self.args.fourcc.upper())
else:
fourcc = int(self.cap.get(cv2.CAP_PROP_FOURCC))
os.makedirs(os.path.dirname(path), exist_ok=True)
return cv2.VideoWriter(path, fourcc, fps, (width, height))
示例3: detect_video
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(0)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
示例4: getInfo
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def getInfo(sourcePath):
cap = cv2.VideoCapture(sourcePath)
info = {
"framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT),
"fps": cap.get(cv2.CAP_PROP_FPS),
"width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
"height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
"codec": int(cap.get(cv2.CAP_PROP_FOURCC))
}
cap.release()
return info
示例5: __setup_stream_settings
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def __setup_stream_settings(self):
# Set compression format
self.__video_stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
# Set Buffer size
# -- Not available in opencv 3.4 -- #
self.__video_stream.set(cv2.CAP_PROP_BUFFERSIZE, rospy.get_param("~buffer_size"))
# Set image size
w, h = rospy.get_param("~frame_size")
self.__video_stream.set(cv2.CAP_PROP_FRAME_WIDTH, w)
self.__video_stream.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
# Set frame rate
self.__video_stream.set(cv2.CAP_PROP_FPS, self.__frame_rate)
示例6: source_capture
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def source_capture(source):
source = int(source) if source.isdigit() else source
cap = cv2.VideoCapture(source)
fourcc_cap = cv2.VideoWriter_fourcc(*'MJPG')
cap.set(cv2.CAP_PROP_FOURCC, fourcc_cap)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, cfg.w)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cfg.h)
return cap
示例7: detect_video
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
示例8: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def __init__(self, src=0, fourcc=None, width=None, height=None, fps=None,
transform=None, queue_size=128, name="WebcamVideoCapture"):
self.cap = cv2.VideoCapture(src)
if not self.cap.isOpened():
raise IOError(f"Cannot open video {src}")
# Set capture properties
if fourcc:
self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc))
if width:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
if height:
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
if fps:
self.cap.set(cv2.CAP_PROP_FPS, fps)
self.transform = transform
# initialize the queue used to store frames read from the webcam
self.queue = Queue(maxsize=queue_size)
# initialize the variable used to indicate if the thread should be stopped
self.stopped = False
self.thread = Thread(target=self.update, args=(), name=name)
self.thread.daemon = True
示例9: detect_video
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def detect_video(yolo, video_path=0, output_path=""):
import cv2
vid = cv2.VideoCapture(0)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
示例10: detect_video
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
示例11: videoinfo
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
示例12: videoinfo
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc, fps, frameSize)
示例13: detect_video
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
pass
示例14: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def __init__(self, camera_id=0, fps=60, **kwargs):
"""Create queues and threads to read and preprocess data."""
self._short_name = 'Webcam'
self._capture = cv.VideoCapture(camera_id)
self._capture.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
self._capture.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
self._capture.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))
self._capture.set(cv.CAP_PROP_FPS, fps)
# Call parent class constructor
super().__init__(**kwargs)
示例15: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FOURCC [as 別名]
def __init__(self, sources):
assert sources
self.captures = []
self.transforms = []
try:
sources = [int(src) for src in sources]
mode = 'cam'
except ValueError:
mode = 'video'
if mode == 'cam':
for id in sources:
log.info('Connection cam {}'.format(id))
cap = cv.VideoCapture(id)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
cap.set(cv.CAP_PROP_FPS, 30)
cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))
assert cap.isOpened()
self.captures.append(cap)
else:
for video_path in sources:
log.info('Opening file {}'.format(video_path))
cap = cv.VideoCapture(video_path)
assert cap.isOpened()
self.captures.append(cap)