当前位置: 首页>>代码示例>>Python>>正文


Python VideoFileClip.iter_frames方法代码示例

本文整理汇总了Python中moviepy.video.io.VideoFileClip.VideoFileClip.iter_frames方法的典型用法代码示例。如果您正苦于以下问题:Python VideoFileClip.iter_frames方法的具体用法?Python VideoFileClip.iter_frames怎么用?Python VideoFileClip.iter_frames使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在moviepy.video.io.VideoFileClip.VideoFileClip的用法示例。


在下文中一共展示了VideoFileClip.iter_frames方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: VideoStim

# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import iter_frames [as 别名]
class VideoStim(Stim, CollectionStimMixin):

    ''' A video. '''

    def __init__(self, filename, onset=None):

        self.clip = VideoFileClip(filename)
        self.fps = self.clip.fps
        self.width = self.clip.w
        self.height = self.clip.h

        self.n_frames = int(self.fps * self.clip.duration)
        duration = self.clip.duration

        super(VideoStim, self).__init__(filename, onset, duration)

    def __iter__(self):
        """ Frame iteration. """
        for i, f in enumerate(self.clip.iter_frames()):
            yield VideoFrameStim(self, i, data=f)

    @property
    def frames(self):
        return [f for f in self.clip.iter_frames()]

    def get_frame(self, index=None, onset=None):
        if index is not None:
            onset = float(index) / self.fps
        else:
            index = int(onset * self.fps)
        return VideoFrameStim(self, index, data=self.clip.get_frame(onset))
开发者ID:qmac,项目名称:featureX,代码行数:33,代码来源:video.py

示例2: _load_video

# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import iter_frames [as 别名]
def _load_video(full_path, size = None, resize_mode = 'resize_and_crop', cut_edges=False, cut_edges_thresh=0):
    """
    Lead a video into a numpy array

    :param full_path: Full path to the video
    :param size: A 2-tuple of width-height, indicating the desired size of the ouput
    :param resize_mode: The mode with which to get the video to the desired size.  Can be:
        'squeeze', 'preserve_aspect', 'crop', 'scale_crop'.  See resize_image in image_ops.py for more info.
    :param cut_edges: True if you want to cut the dark edges from the video
    :param cut_edges_thresh: If cut_edges, this is the threshold at which you'd like to cut them.
    :return: A (n_frames, height, width, 3) numpy array
    """
    try:
        from moviepy.video.io.VideoFileClip import VideoFileClip
    except ImportError:
        raise ImportError("You need to install moviepy to read videos.  In the virtualenv, go `pip install moviepy`")
    assert os.path.exists(full_path)
    video = VideoFileClip(full_path)
    images = []
    edge_crops = None
    for frame in video.iter_frames():
        if cut_edges:
            if edge_crops is None:
                edge_crops = get_dark_edge_slice(frame, cut_edges_thresh=cut_edges_thresh)
            else:
                frame = frame[edge_crops[0], edge_crops[1]]
        if size is not None:
            width, height = size
            frame = resize_image(frame, width=width, height=height, mode=resize_mode)
        images.append(frame)
    return images
开发者ID:QUVA-Lab,项目名称:artemis,代码行数:33,代码来源:smart_io.py

示例3: ffwd_video

# 需要导入模块: from moviepy.video.io.VideoFileClip import VideoFileClip [as 别名]
# 或者: from moviepy.video.io.VideoFileClip.VideoFileClip import iter_frames [as 别名]
def ffwd_video(path_in, path_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    video_clip = VideoFileClip(path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out, video_clip.size, video_clip.fps, codec="libx264",
                                                    preset="medium", bitrate="2000k",
                                                    audiofile=path_in, threads=None,
                                                    ffmpeg_params=None)

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        def style_and_write(count):
            for i in range(count, batch_size):
                X[i] = X[count - 1]  # Use last frame to fill X
            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for i in range(0, count):
                video_writer.write_frame(np.clip(_preds[i], 0, 255).astype(np.uint8))

        frame_count = 0  # The frame count that written to X
        for frame in video_clip.iter_frames():
            X[frame_count] = frame
            frame_count += 1
            if frame_count == batch_size:
                style_and_write(frame_count)
                frame_count = 0

        if frame_count != 0:
            style_and_write(frame_count)

        video_writer.close()
开发者ID:XingXL,项目名称:fast-style-transfer,代码行数:50,代码来源:evaluate.py


注:本文中的moviepy.video.io.VideoFileClip.VideoFileClip.iter_frames方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。