本文整理汇总了Python中av.open方法的典型用法代码示例。如果您正苦于以下问题:Python av.open方法的具体用法?Python av.open怎么用?Python av.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类av
的用法示例。
在下文中一共展示了av.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stat_update
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def stat_update(output_dir, process_number, align_length, workerqueue):
face_distance_log = open(str(output_dir / 'face_distance_log.txt'), 'w')
stat = []
pbar = tqdm(total=align_length, ascii=True)
for i in range(process_number):
stat.append(0)
for i in range(process_number):
while True:
item = workerqueue.get()
if item is None:
break
processn, idx, face_files, scores = item
pbar.update(idx - stat[processn])
stat[processn] = idx
for (face_file, score) in zip(face_files, scores):
face_distance_log.write('{}\t{}\n'.format(face_file.stem, score))
face_distance_log.close()
pbar.close()
示例2: main
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def main( args ):
output_dir = Path(args.output_dir)
output_dir.mkdir( parents=True, exist_ok=True )
output_file = output_dir / args.alignments
queue_prepare_image = queue.Queue(maxsize=1)
thread_prepare_image = threading.Thread(target=prepare_image, args=(args.input_dir, args.seekstart, args.durationtime, args.fps, queue_prepare_image))
thread_prepare_image.start()
queue_find_face = queue.Queue(maxsize=1)
thread_find_face = threading.Thread(target=find_face, args=(queue_prepare_image, queue_find_face))
thread_find_face.start()
face_alignments = list( align_face(output_dir, queue_find_face) )
with output_file.open('w') as f:
results = json.dumps( face_alignments, ensure_ascii=False )
f.write( results )
print( "Save face alignments to output file:", output_file )
示例3: toggle_recording
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def toggle_recording(self, speed):
"""Handle recording keypress, creates output stream and file"""
if speed == 0:
return
self.record = not self.record
if self.record:
datename = [os.getenv('HOME'), datetime.datetime.now().strftime(self.date_fmt)]
self.out_name = '{}/Pictures/tello-{}.mp4'.format(*datename)
print("Outputting video to:", self.out_name)
self.out_file = av.open(self.out_name, 'w')
self.start_time = time.time()
self.out_stream = self.out_file.add_stream(
'mpeg4', self.vid_stream.rate)
self.out_stream.pix_fmt = 'yuv420p'
self.out_stream.width = self.vid_stream.width
self.out_stream.height = self.vid_stream.height
if not self.record:
print("Video saved to ", self.out_name)
self.out_file.close()
self.out_stream = None
示例4: __init__
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def __init__(self, filename: str, fps=30, progress=None):
"""
Construct an event handler that outputs to an Mp4 file.
:param filename: The output file to write to.
:param fps: The frame rate (30 recommended).
:param progress: An optional callback (sig: `() -> ()`) whenever a frame is muxed.
"""
self.sink = Mp4Sink()
self.filename = filename
self.mp4 = f = av.open(filename, 'w')
self.stream = f.add_stream('h264', rate=fps)
self.stream.pix_fmt = 'yuv420p'
self.progress = progress
self.scale = False
self.mouse = (0, 0)
self.fps = fps
self.delta = 1000 // fps # ms per frame
self.log = logging.getLogger(__name__)
self.log.info('Begin MP4 export to %s: %d FPS', filename)
self.timestamp = self.prevTimestamp = None
super().__init__(self.sink)
示例5: video_worker
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def video_worker(self):
# Get video stream, open in PyAV
container = av.open(self._drone.get_video_stream())
# Decode h264
rospy.loginfo('starting video pipeline')
for frame in container.decode(video=0):
# Convert PyAV frame => PIL image => OpenCV Mat
color_mat = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
# Convert OpenCV Mat => ROS Image message and publish
self._image_pub.publish(self._cv_bridge.cv2_to_imgmsg(color_mat, 'bgr8'))
# Check for normal shutdown
if self._stop_request.isSet():
return
示例6: read_keyframes
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def read_keyframes(
video_fpath: str, keyframes: FrameTsList, video_stream_idx: int = 0
) -> FrameList:
"""
Reads keyframe data from a video file.
Args:
video_fpath (str): Video file path
keyframes (List[int]): List of keyframe timestamps (as counts in
timebase units to be used in container seek operations)
video_stream_idx (int): Video stream index (default: 0)
Returns:
List[Frame]: list of frames that correspond to the specified timestamps
"""
with PathManager.open(video_fpath, "rb") as io:
container = av.open(io)
stream = container.streams.video[video_stream_idx]
frames = []
for pts in keyframes:
container.seek(pts, any_frame=False, stream=stream)
frame = next(container.decode(video=0))
frames.append(frame)
container.close()
return frames
示例7: get_video_container
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def get_video_container(path_to_vid, multi_thread_decode=False, backend="pyav"):
"""
Given the path to the video, return the pyav video container.
Args:
path_to_vid (str): path to the video.
multi_thread_decode (bool): if True, perform multi-thread decoding.
backend (str): decoder backend, options include `pyav` and
`torchvision`, default is `pyav`.
Returns:
container (container): video container.
"""
if backend == "torchvision":
with open(path_to_vid, "rb") as fp:
container = fp.read()
return container
elif backend == "pyav":
container = av.open(path_to_vid)
if multi_thread_decode:
# Enable multiple threads for decoding.
container.streams.video[0].thread_type = "AUTO"
return container
else:
raise NotImplementedError("Unknown backend {}".format(backend))
示例8: write_vid_blk
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def write_vid_blk(arr, vpath):
uid = uuid4()
vname = "{}.mp4".format(uid)
fpath = os.path.join(vpath, vname)
arr = np.clip(arr, 0, 255).astype(np.uint8)
container = av.open(fpath, mode='w')
stream = container.add_stream('mpeg4', rate=30)
stream.width = arr.shape[2]
stream.height = arr.shape[1]
stream.pix_fmt = 'yuv420p'
for fm in arr:
fm = cv2.cvtColor(fm, cv2.COLOR_GRAY2RGB)
fmav = av.VideoFrame.from_ndarray(fm, format='rgb24')
for p in stream.encode(fmav):
container.mux(p)
for p in stream.encode():
container.mux(p)
container.close()
return fpath
示例9: extractFrontCover
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def extractFrontCover(mutagenFile):
for pic in getattr(mutagenFile, 'pictures', []):
if pic.type == mutagen.id3.PictureType.COVER_FRONT:
image = Image.open(io.BytesIO(pic.data))
return ImageDataTuple(image, pic.data)
if isinstance(getattr(mutagenFile, 'Cover Art (Front)', None),
mutagen.apev2.APEBinaryValue):
return loadImageFromAPEBinaryValue(mutagenFile['Cover Art (Front)'])
# print(mutagenFile)
if ('WM/Picture' in mutagenFile and
isinstance(mutagenFile['WM/Picture'][0],
mutagen.asf._attrs.ASFByteArrayAttribute)):
return loadImageFromASFByteArrayAttribute(mutagenFile['WM/Picture'][0])
if 'covr' in mutagenFile and isinstance(mutagenFile['covr'], list):
return loadImageFromData(mutagenFile['covr'][0])
if 'APIC:' in mutagenFile and isinstance(mutagenFile['APIC:'],
mutagen.id3.APIC):
return loadImageFromData(mutagenFile['APIC:'].data)
return extractAnyImageFromList(mutagenFile)
示例10: calculateAudioTrackSHA256
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def calculateAudioTrackSHA256(path, tmpdir='/tmp'):
# extension=path[path.rfind('.'):]
# (fn, tmpfilename) = tempfile.mkstemp(suffix=extension, dir=tmpdir)
filelike = io.BytesIO(open(path, 'rb').read())
filelike.name = path
# filelike.filename = path
# print(path, tmpfilename)
# try:
removeAllTags(filelike)
# shutil.copyfile(path, tmpfilename)
# removeAllTags(tmpfilename)
# if os.path.getsize(tmpfilename) >= os.path.getsize(path):
# print('Error removing tags from %s (%d >= %d)' % \
# (path, os.path.getsize(tmpfilename), os.path.getsize(path)))
print(len(filelike.getvalue()))
# open('/tmp/output9.mp3','wb').write(filelike.getvalue())
filelike.seek(0)
return calculateSHA256(filelike)
# finally:
# os.close(fn)
# os.unlink(tmpfilename)
# return None
示例11: decodeAudio
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def decodeAudio(filething):
if hasattr(filething, 'seek'):
filething.seek(0)
filecontents = filething.read()
data, properties = bard_audiofile.decode(data=filecontents)
else:
data, properties = bard_audiofile.decode(path=filething)
if config['enable_internal_checks']:
FILES_PYDUB_CANT_DECODE_RIGHT = \
['/mnt/DD8/media/mp3/id13/k3/software_libre-hq.ogg']
if hasattr(filething, 'seek'):
filething.seek(0)
audio_segment = AudioSegment.from_file(filething)
if (audio_segment.raw_data != data and
filething not in FILES_PYDUB_CANT_DECODE_RIGHT):
with open('/tmp/decoded-song-pydub.raw', 'wb') as f:
f.write(audio_segment.raw_data)
with open('/tmp/decoded-song-bard_audiofile.raw', 'wb') as f:
f.write(data)
raise Exception('DECODED AUDIO IS DIFFERENT BETWEEN '
'BARD_AUDIOFILE AND PYDUB')
print('bard_audiofile/pydub decode check ' +
TerminalColors.Ok + 'OK' + TerminalColors.ENDC)
return data, DecodedAudioPropertiesTupleFromDict(properties)
示例12: main
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def main():
rospy.init_node('h264_listener')
rospy.Subscriber("/tello/image_raw/h264", H264Packet, callback)
container = av.open(stream)
rospy.loginfo('main: opened')
for frame in container.decode(video=0):
image = cv2.cvtColor(numpy.array(
frame.to_image()), cv2.COLOR_RGB2BGR)
cv2.imshow('Frame', image)
cv2.waitKey(1)
示例13: prepare_image
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def prepare_image(input_dir, seekstart, durationtime, fps, workerqueue):
container = av.open(input_dir)
stream = container.streams.video[0]
container.seek(seekstart*1000000)
frame = next(f for f in container.decode(video=0))
videostart = frame.pts * stream.time_base
endtime = seekstart + durationtime
d = min(endtime, stream.duration * stream.time_base) - videostart
pbar = tqdm(total=(int(d*min(stream.average_rate, fps))+1), ascii=True)
image = frame.to_nd_array(format='rgb24')
workerqueue.put((frame.pts, image))
pbar.update(1)
fps = 1/fps
timenext = videostart + fps
for frame in container.decode(video=0):
timenow = frame.pts * stream.time_base
if timenow > endtime:
break
if (timenow >= timenext):
timenext += fps
image = frame.to_nd_array(format='rgb24')
workerqueue.put((frame.pts, image))
pbar.update(1)
workerqueue.put(None)
pbar.close()
示例14: main
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def main():
# Set up tello streaming
drone = tellopy.Tello()
drone.log.set_level(2)
drone.connect()
drone.start_video()
# container for processing the packets into frames
container = av.open(drone.get_video_stream())
video_st = container.streams.video[0]
# stream and outputfile for video
output = av.open('archive.mp4', 'w')
ovstream = output.add_stream('mpeg4', video_st.rate)
ovstream.pix_fmt = 'yuv420p'
ovstream.width = video_st.width
ovstream.height = video_st.height
counter = 0
save = True
for packet in container.demux((video_st,)):
for frame in packet.decode():
# convert frame to cv2 image and show
image = cv2.cvtColor(numpy.array(
frame.to_image()), cv2.COLOR_RGB2BGR)
cv2.imshow('frame', image)
key = cv2.waitKey(1) & 0xFF
# save initial 1300 frames
if save:
new_frame = av.VideoFrame(
width=frame.width, height=frame.height, format=frame.format.name)
for i in range(len(frame.planes)):
new_frame.planes[i].update(frame.planes[i])
encode(new_frame, ovstream, output)
counter += 1
print("Frames encoded:", counter)
if counter > 300:
output.close()
save == False
示例15: __init__
# 需要导入模块: import av [as 别名]
# 或者: from av import open [as 别名]
def __init__(self):
self.prev_flight_data = None
self.record = False
self.tracking = False
self.keydown = False
self.date_fmt = '%Y-%m-%d_%H%M%S'
self.speed = 50
self.drone = tellopy.Tello()
self.init_drone()
self.init_controls()
# container for processing the packets into frames
self.container = av.open(self.drone.get_video_stream())
self.vid_stream = self.container.streams.video[0]
self.out_file = None
self.out_stream = None
self.out_name = None
self.start_time = time.time()
# tracking a color
green_lower = (30, 50, 50)
green_upper = (80, 255, 255)
#red_lower = (0, 50, 50)
# red_upper = (20, 255, 255)
# blue_lower = (110, 50, 50)
# upper_blue = (130, 255, 255)
self.track_cmd = ""
self.tracker = Tracker(self.vid_stream.height,
self.vid_stream.width,
green_lower, green_upper)