當前位置: 首頁>>代碼示例>>Python>>正文


Python VideoFileClip.get_frame方法代碼示例

本文整理匯總了Python中moviepy.editor.VideoFileClip.get_frame方法的典型用法代碼示例。如果您正苦於以下問題:Python VideoFileClip.get_frame方法的具體用法?Python VideoFileClip.get_frame怎麽用?Python VideoFileClip.get_frame使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在moviepy.editor.VideoFileClip的用法示例。


在下文中一共展示了VideoFileClip.get_frame方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: MoviePyReader

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
class MoviePyReader(FramesSequence):
    class_priority = 4
    @classmethod
    def class_exts(cls):
        return {'mov', 'mp4', 'avi', 'mpeg', 'wmv', 'mkv'}
    def __init__(self, filename):
        if VideoFileClip is None:
            raise ImportError('The MoviePyReader requires moviepy to work.')
        self.clip = VideoFileClip(filename)
        self.filename = filename
        self._fps = self.clip.fps
        self._len = int(self.clip.fps * self.clip.end)

        first_frame = self.clip.get_frame(0)
        self._shape = first_frame.shape
        self._dtype = first_frame.dtype

    def get_frame(self, i):
        return Frame(self.clip.get_frame(i / self._fps), frame_no=i)

    def __len__(self):
        return self._len

    @property
    def frame_shape(self):
        return self._shape

    @property
    def frame_rate(self):
        return self._fps

    @property
    def pixel_type(self):
        return self._dtype  
開發者ID:soft-matter,項目名稱:pims,代碼行數:36,代碼來源:moviepy_reader.py

示例2: average_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        delta = f - prev_f
        sum_delta_fs += delta
        sum_fs += f

        ma_sum_fs += f
        if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
            ma_f = ma_sum_fs / 100
            Image.fromarray(ma_f.astype(np.uint8))\
                .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
            ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        n_frames += 1
        prev_f = f

    # average out the values for each frame
    average_delta_f = sum_delta_fs / n_frames
    average_f = sum_fs / n_frames

    # Create images
    delta_img = Image.fromarray(average_delta_f.astype(np.uint8))
    delta_img.save(os.path.join(outpath, 'average_delta.png'))
    final_img = Image.fromarray(average_f.astype(np.uint8))
    final_img.save(os.path.join(outpath, 'average.png'))
開發者ID:nicodv,項目名稱:research,代碼行數:52,代碼來源:frameprocessor.py

示例3: video2rollscan

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
def video2rollscan(videofile, focus, start=0, end=None, savefile=None):
    """
    
    Makes a scan of the roll from the video.
    Requires the pyton module MoviePy
    
    Parameters
    -----------
    
    video
        Any videofile that MoviePy (FFMPEG) can read.
        
    focus
        A function ( f(image)->rectangular image ). For instance
        if the line of interest is defined by y=15 and x=10...230
        
        >>> focus = lambda im : im[ [15], 10:230 ]
        
    start,end
        Where to start and stop, each one either in seconds, or in
        format `(minutes, seconds)`. By default `start=0` and `end`
        is the end of the video.
        
    savefile
        If provided, the scan image will be saved under this name.
    
    Returns
    --------
    
      A W*H*3 RGB picture of the piano roll made by stacking the focus
      lines of the different frames under one another.
    """

    from moviepy.editor import VideoFileClip

    if end is None:
        end = video.duration

    video = VideoFileClip(videofile, audio=False).subclip(start, end)

    tt = np.arange(0, video.duration, 1.0 / video.fps)
    result = np.vstack([focus(video.get_frame(t)) for t in tt])

    if savefile:
        import matplotlib.pyplot as plt

        plt.imsave(savefile)

    return result
開發者ID:99plus2,項目名稱:unroll,代碼行數:51,代碼來源:video.py

示例4: average_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False).resize(width=66)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        #delta = f - prev_f
        #sum_delta_fs += delta
        #sum_fs += f

        #ma_sum_fs += f
        #if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
        #    ma_f = ma_sum_fs / 100
        #    Image.fromarray(ma_f.astype(np.uint8))\
        #        .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
        #    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        #n_frames += 1
        #prev_f = f
        print len(f)
        time.sleep(1.0/float(sample_every))
開發者ID:cheetahray,項目名稱:Shanghai,代碼行數:44,代碼來源:frameprocessor.py

示例5: run_moving_crash

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
def run_moving_crash(args, target, outfile):
    """Runs a moving crash based on moving (gif/mp4) inputs"""
    video = VideoFileClip(target)
    img = video.get_frame(t=0)  # first frame of the video
    bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth)
    max_depth = bounds.max_depth
    crash_params = crash.CrashParams(
        max_depth, args.threshold, args.bg_value, args.rgb_select)
    options = _options(args.reveal_foreground, args.reveal_background,
                       args.crash, args.reveal_quadrants, args.bg_value)
    frames = video.iter_frames(fps=video.fps)

    def make_frame(_):
        frame = next(frames)
        fg, bounds = foreground.find_foreground(frame, crash_params)
        return _process_img(frame, fg, bounds, options)

    output_video = VideoClip(
        make_frame, duration=video.duration-(4/video.fps))  # trim last 4 frms
    output_video.write_videofile(
        outfile, preset=args.compression, fps=video.fps,
        threads=args.in_parallel)
開發者ID:TadLeonard,項目名稱:crash-kiss,代碼行數:24,代碼來源:kiss.py

示例6: split_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
def split_video():

  movie_title = os.path.split(args.source_path)[-1]
  offset_csv = os.path.join(args.target_folder, 'offsets.csv')
  offsets = []
  video = VideoFileClip(args.source_path, audio=False)
  framerate = video.fps
  width = (np.size(video.get_frame(0), 1) - args.middle_gap_pixel_size) / 2
  left_video = moviepy.video.fx.all.crop(video, x1=0, width=width)
  right_video = moviepy.video.fx.all.crop(video, x1=width + args.middle_gap_pixel_size, width=width)
  right_frame_iterator = right_video.iter_frames()
  output_ind = args.output_starting_ind

  for ind, left_frame in enumerate(left_video.iter_frames()):
    left_frame = rgb2gray(left_frame)
    right_frame = rgb2gray(right_frame_iterator.next())
    if (ind % 20 == 0): # INITIALIZE
      left_frames = []
      right_frames = []
      offset_frames = []
      first_start = ind
      offset = randint(1,10)
      second_start = first_start + offset
      offset_left = randint(0, 1) == 1
    if (ind >= first_start and ind < first_start + 10): # ADD FRAMES
      right_frames.append(right_frame)
      left_frames.append(left_frame)
    if (ind >= second_start and ind < second_start + 10): # ADD OFFSET FRAMES
      if (offset_left):
        offset_frames.append(left_frame)
      else:
        offset_frames.append(right_frame)
    if (ind % 20 == 19): # SAVE SEGMENT FRAMES TO JPEG
      if args.output_images:
        assert len(left_frames) == 10, 'Only added ' + str(len(left_frames)) + ' left frames on segment ' + str(output_ind) + '. Should have 10.'
        assert len(right_frames) == 10, 'Only added ' + str(len(right_frames)) + ' right frames on segment ' + str(output_ind) + '. Should have 10.'
        assert len(offset_frames) == 10, 'Only added ' + str(len(offset_frames)) + ' offset frames on segment ' + str(output_ind) + '. Should have 10.'
        for frame_ind, left_frame in enumerate(left_frames):
          misc.toimage(left_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
        for frame_ind, right_frame in enumerate(right_frames):
          misc.toimage(right_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
      else:
        left_video_out = ImageSequenceClip(left_frames, fps=framerate)
        left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
        right_video_out = ImageSequenceClip(right_frames, fps=framerate)
        right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      offsets.append({ 'id': '%06d' % output_ind, 'offset_frames': 0 })
      output_ind += 1
      if (offset_left):
        if args.output_images:
          for frame_ind, offset_frame in enumerate(offset_frames):
            misc.toimage(offset_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
          for frame_ind, right_frame in enumerate(right_frames):
            misc.toimage(right_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
        else:
          left_video_out = ImageSequenceClip(offset_frames, fps=framerate)
          left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
          right_video_out = ImageSequenceClip(right_frames, fps=framerate)
          right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      else:
        if args.output_images:
          for frame_ind, left_frame in enumerate(left_frames):
            misc.toimage(left_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
          for frame_ind, offset_frame in enumerate(offset_frames):
            misc.toimage(offset_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
        else:
          left_video_out = ImageSequenceClip(left_frames, fps=framerate)
          left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
          right_video_out = ImageSequenceClip(offset_frames, fps=framerate)
          right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      offsets.append({ 'id': '{:06d}'.format(output_ind), 'offset_frames': offset })
      output_ind += 1
    if (ind % 1000 == 0):
      print('Finished processing {:d} datapoints.'.format(output_ind))
  os.remove(offset_csv)
  with open(offset_csv, 'w') as offset_csv_file:
    w = csv.DictWriter(offset_csv_file, fieldnames=['id', 'offset_frames'])
    w.writeheader()
    w.writerows(offsets)
  return True
開發者ID:MosNicholas,項目名稱:audio-video-alignment,代碼行數:82,代碼來源:create_left_right_dataset.py

示例7: MyPaintWidget

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]

#.........這裏部分代碼省略.........
			return None
		# create default txt filename
		self.txtFilename = filename[:filename.rfind(".")] + "_label.txt"
		# display txt filename
		self.ids.PathSaveFile.text = self.txtFilename
		# create temp directory for frames from video
		# self.dirPath = mkdtemp()
		self.dirPath = "asdf"
		makedirs("asdf")
		print self.video.duration
		# set number of frame
		self.numberOfFrame = 0
		# change winow size
		Window.size = (self.video.size[0], self.video.size[1] + pad)
		self.size = (self.video.size[0], self.video.size[1] + pad)

		self.firstFrame = True
		# find rectangle
		for obj in self.canvas.children:
			if type(obj) == Rectangle:
				self.rect = obj
		# set rectangle size
		self.rect.size = (self.size[0], self.size[1] - pad)
		# set frame to rectangle
		self.rect.source = self.GetFrameFilename()

	# return filename of frame from video
	def GetFrameFilename(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		if (self.firstFrame):
			imsave(self.dirPath + "/temp0.png", self.video.get_frame(self.numberOfFrame / self.video.fps))
			self.firstFrame = False
			self.dictionary[self.numberOfFrame] = {}
			self.dictionary[self.numberOfFrame]["smoke"] = []
			self.dictionary[self.numberOfFrame]["fire"] = []
			return self.dirPath + "/temp0.png"
		else:
			# increase number of frame
			self.numberOfFrame = int(self.numberOfFrame) + speed
			if (self.numberOfFrame >= self.video.duration * self.video.fps):
				self.numberOfFrame = int(self.numberOfFrame) - speed
				self.Video = None
				return None
			self.dictionary[self.numberOfFrame] = {}
			self.dictionary[self.numberOfFrame]["smoke"] = []
			self.dictionary[self.numberOfFrame]["fire"] = []
			# save frame from video
			imsave(self.dirPath + "/temp" + str(self.numberOfFrame) + ".png",
			       self.video.get_frame(int(self.numberOfFrame / self.video.fps)))
			print self.numberOfFrame / (self.video.fps * self.video.duration) * 100
			return self.dirPath + "/temp" + str(self.numberOfFrame) + ".png"

	def DrawSmoke(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.smoke = True
		self.fire = False

	def DrawFire(self):
		# check if video open
		if (self.video == None):
開發者ID:allgrom,項目名稱:VKR,代碼行數:70,代碼來源:LabelData.py

示例8: average_video

# 需要導入模塊: from moviepy.editor import VideoFileClip [as 別名]
# 或者: from moviepy.editor.VideoFileClip import get_frame [as 別名]
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    global sb1
    global sb2
    global sb3
    global sb4
    global sb5
    global sb6
    global sb7
    global sb8
    global sb9
    global sb10
    global sb11
    global sb12
    global sb13
    global sb14
    global sb15
    global sb16
    global sb17
    global sb18
    global sb19
    global sb20
    global sb21
    global sb22
    global sb23
    global sb24
    global sb25
    global sb26
    global sb27
    global sb28
    global sb29
    global sb30
    global sb31
    global sb32
    global sb33
    global sb34
    global sb35
    global sb36
    global sb37
    global sb38
    global sb39
    global sb40
    global sb41
    global sb42
    global sb43
    global sb44
    global sb45
    global sb46
    global sb47
    global sb48
    global sb49
    global sb50
    global sb51
    global sb52
    global sb53
    global sb54
    global sb55
    global sb56
    global sb57
    global sb58
    global sb59
    global sb60
    global sb61
    global sb62
    global sb63
    global sb64
    global sb65
    global sb66
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False).resize(width=66)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        #delta = f - prev_f
        #sum_delta_fs += delta
        #sum_fs += f

        #ma_sum_fs += f
        #if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
        #    ma_f = ma_sum_fs / 100
#.........這裏部分代碼省略.........
開發者ID:cheetahray,項目名稱:Shanghai,代碼行數:103,代碼來源:broadspi.py


注:本文中的moviepy.editor.VideoFileClip.get_frame方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。