本文整理汇总了Python中moviepy.video.io.VideoFileClip.VideoFileClip.get_frame方法的典型用法代码示例。如果您正苦于以下问题:Python VideoFileClip.get_frame方法的具体用法?Python VideoFileClip.get_frame怎么用?Python VideoFileClip.get_frame使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类moviepy.video.io.VideoFileClip.VideoFileClip
的用法示例。
在下文中一共展示了VideoFileClip.get_frame方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: VideoStim
# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import get_frame [as 别名]
class VideoStim(Stim, CollectionStimMixin):
''' A video. '''
def __init__(self, filename, onset=None):
self.clip = VideoFileClip(filename)
self.fps = self.clip.fps
self.width = self.clip.w
self.height = self.clip.h
self.n_frames = int(self.fps * self.clip.duration)
duration = self.clip.duration
super(VideoStim, self).__init__(filename, onset, duration)
def __iter__(self):
""" Frame iteration. """
for i, f in enumerate(self.clip.iter_frames()):
yield VideoFrameStim(self, i, data=f)
@property
def frames(self):
return [f for f in self.clip.iter_frames()]
def get_frame(self, index=None, onset=None):
if index is not None:
onset = float(index) / self.fps
else:
index = int(onset * self.fps)
return VideoFrameStim(self, index, data=self.clip.get_frame(onset))
示例2: test_ffmpeg_resizing
# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import get_frame [as 别名]
def test_ffmpeg_resizing():
"""Test FFmpeg resizing, to include downscaling."""
video_file = 'media/big_buck_bunny_432_433.webm'
target_resolution = (128, 128)
video = VideoFileClip(video_file, target_resolution=target_resolution)
frame = video.get_frame(0)
assert frame.shape[0:2] == target_resolution
target_resolution = (128, None)
video = VideoFileClip(video_file, target_resolution=target_resolution)
frame = video.get_frame(0)
assert frame.shape[0] == target_resolution[0]
target_resolution = (None, 128)
video = VideoFileClip(video_file, target_resolution=target_resolution)
frame = video.get_frame(0)
assert frame.shape[1] == target_resolution[1]
# Test upscaling
target_resolution = (None, 2048)
video = VideoFileClip(video_file, target_resolution=target_resolution)
frame = video.get_frame(0)
assert frame.shape[1] == target_resolution[1]
示例3: MovieStim3
# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import get_frame [as 别名]
#.........这里部分代码省略.........
Note that this is relative to the original, not relative to the current state.
"""
self.flipVert = not newVal
logAttrib(self, log, 'flipVert')
def getFPS(self):
"""
Returns the movie frames per second playback speed.
"""
return self._mov.fps
def getCurrentFrameTime(self):
"""
Get the time that the movie file specified the current video frame as
having.
"""
return self._nextFrameT - self._frameInterval
def _updateFrameTexture(self):
if self._nextFrameT is None:
# movie has no current position, need to reset the clock
# to zero in order to have the timing logic work
# otherwise the video stream would skip frames until the
# time since creating the movie object has passed
self._videoClock.reset()
self._nextFrameT = 0
#only advance if next frame (half of next retrace rate)
if self._nextFrameT > self.duration:
self._onEos()
elif (self._numpyFrame is not None) and \
(self._nextFrameT > (self._videoClock.getTime()-self._retraceInterval/2.0)):
return None
self._numpyFrame = self._mov.get_frame(self._nextFrameT)
useSubTex=self.useTexSubImage2D
if self._texID is None:
self._texID = GL.GLuint()
GL.glGenTextures(1, ctypes.byref(self._texID))
useSubTex=False
#bind the texture in openGL
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)#bind that name to the target
GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_WRAP_S,GL.GL_REPEAT) #makes the texture map wrap (this is actually default anyway)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1) # data from PIL/numpy is packed, but default for GL is 4 bytes
#important if using bits++ because GL_LINEAR
#sometimes extrapolates to pixel vals outside range
if self.interpolate:
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
if useSubTex is False:
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
self._numpyFrame.shape[1],self._numpyFrame.shape[0], 0,
GL.GL_RGB, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
else:
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
self._numpyFrame.shape[1], self._numpyFrame.shape[0],
GL.GL_RGB, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
else:
GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_MAG_FILTER,GL.GL_NEAREST)
GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_MIN_FILTER,GL.GL_NEAREST)
if useSubTex is False:
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
self._numpyFrame.shape[1],self._numpyFrame.shape[0], 0,
GL.GL_BGR, GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
示例4: Decoder
# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import get_frame [as 别名]
#.........这里部分代码省略.........
# Start videoclock with start of this thread
self.clock.start()
logger.debug("Started rendering loop.")
# Main rendering loop
while self.status in [PLAYING,PAUSED]:
current_frame_no = self.clock.current_frame
# Check if end of clip has been reached
if self.clock.time >= self.duration:
logger.debug("End of stream reached at {}".format(self.clock.time))
if self.loop:
logger.debug("Looping: restarting stream")
# Seek to the start
self.seek(0)
else:
# End of stream has been reached
self.status = EOS
break
if self.last_frame_no != current_frame_no:
# A new frame is available. Get it from te stream
self.__render_videoframe()
self.last_frame_no = current_frame_no
# Sleeping is a good idea to give the other threads some breathing
# space to do their work.
time.sleep(0.005)
# Stop the clock.
self.clock.stop()
logger.debug("Rendering stopped.")
def __render_videoframe(self):
""" Retrieves a new videoframe from the stream.
Sets the frame as the __current_video_frame and passes it on to
__videorenderfunc() if it is set. """
new_videoframe = self.clip.get_frame(self.clock.time)
# Pass it to the callback function if this is set
if callable(self.__videorenderfunc):
self.__videorenderfunc(new_videoframe)
# Set current_frame to current frame (...)
self.__current_videoframe = new_videoframe
def __audiorender_thread(self):
""" Thread that takes care of the audio rendering. Do not call directly,
but only as the target of a thread. """
new_audioframe = None
logger.debug("Started audio rendering thread.")
while self.status in [PLAYING,PAUSED]:
# Retrieve audiochunk
if self.status == PLAYING:
if new_audioframe is None:
# Get a new frame from the audiostream, skip to the next one
# if the current one gives a problem
try:
start = self.audio_times.pop(0)
stop = self.audio_times[0]
except IndexError:
logger.debug("Audio times could not be obtained")
time.sleep(0.02)
continue
# Get the frame numbers to extract from the audio stream.
chunk = (1.0/self.audioformat['fps'])*np.arange(start, stop)
try:
# Extract the frames from the audio stream. Does not always,
# succeed (e.g. with bad streams missing frames), so make
# sure this doesn't crash the whole program.
new_audioframe = self.clip.audio.to_soundarray(
tt = chunk,
buffersize = self.frame_interval*self.clip.audio.fps,
quantize=True
)
except OSError as e:
logger.warning("Sound decoding error: {}".format(e))
new_audioframe = None
# Put audioframe in buffer/queue for soundrenderer to pick up. If
# the queue is full, try again after a timeout (this allows to check
# if the status is still PLAYING after a pause.)
if not new_audioframe is None:
try:
self.audioqueue.put(new_audioframe, timeout=.05)
new_audioframe = None
except Full:
pass
time.sleep(0.005)
logger.debug("Stopped audio rendering thread.")
def __repr__(self):
""" Create a string representation for when print() is called. """
return "Decoder [file loaded: {0}]".format(self.loaded_file)