當前位置: 首頁>>代碼示例>>Python>>正文


Python editor.VideoFileClip方法代碼示例

本文整理匯總了Python中moviepy.editor.VideoFileClip方法的典型用法代碼示例。如果您正苦於以下問題:Python editor.VideoFileClip方法的具體用法?Python editor.VideoFileClip怎麽用?Python editor.VideoFileClip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在moviepy.editor的用法示例。


在下文中一共展示了editor.VideoFileClip方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_supercut

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def create_supercut(composition, outputfile, padding):
    """Concatenate video clips together and output finished video file to the
    output directory.
    """
    print("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]

    print("[+] Concatenating clips.")
    final_clip = concatenate(cut_clips)

    print("[+] Writing ouput file.")
    final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac') 
開發者ID:antiboredom,項目名稱:videogrep,代碼行數:24,代碼來源:videogrep.py

示例2: write_wav

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def write_wav(self, video_obj, target_wav_file):
        '''
        Writes the audio stream of a video as a wav suitable as input to HTK

        ----------
        video_obj: a moviepy VideoFileClip

        target_wav_file: path to write the wav file to

        Returns
        -------
        None

        '''
        assert isinstance(video_obj, VideoFileClip), "video needs to be a instance of VideoFileClip"

        # Write audio stream of video to file in the desired format
        video_obj.audio.write_audiofile(target_wav_file, fps=16000,  # Set fps to 16k
                                        codec='pcm_s16le',
                                        ffmpeg_params=['-ac', '1'])  # Convert to mono 
開發者ID:znaoya,項目名稱:aenet,代碼行數:22,代碼來源:__init__.py

示例3: get_samples

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def get_samples(subject_id):
    arousal_label_path = root_dir / 'Ratings_affective_behaviour_CCC_centred/arousal/{}.csv'.format(subject_id)
    valence_label_path = root_dir / 'Ratings_affective_behaviour_CCC_centred/valence/{}.csv'.format(subject_id)

    clip = VideoFileClip(str(root_dir / "Video_recordings_MP4/{}.mp4".format(subject_id)))

    subsampled_audio = clip.audio.set_fps(16000)

    audio_frames = []
    for i in range(1, 7501):
        time = 0.04 * i

        audio = np.array(list(subsampled_audio.subclip(time - 0.04, time).iter_frames()))
        audio = audio.mean(1)[:640]

        audio_frames.append(audio.astype(np.float32))

    arousal = np.loadtxt(str(arousal_label_path), delimiter=',')[:, 1][1:]
    valence = np.loadtxt(str(valence_label_path), delimiter=',')[:, 1][1:]

    return audio_frames, np.dstack([arousal, valence])[0].astype(np.float32) 
開發者ID:tzirakis,項目名稱:Multimodal-Emotion-Recognition,代碼行數:23,代碼來源:data_generator.py

示例4: _read_video_thumbnail

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def _read_video_thumbnail(data: bytes, video_ext: str = "mp4", frame_ext: str = "png",
                          max_size: Tuple[int, int] = (1024, 720)) -> Tuple[bytes, int, int]:
    with tempfile.NamedTemporaryFile(prefix="mxtg_video_", suffix=f".{video_ext}") as file:
        # We don't have any way to read the video from memory, so save it to disk.
        file.write(data)

        # Read temp file and get frame
        frame = VideoFileClip(file.name).get_frame(0)

    # Convert to png and save to BytesIO
    image = Image.fromarray(frame).convert("RGBA")

    thumbnail_file = BytesIO()
    if max_size:
        image.thumbnail(max_size, Image.ANTIALIAS)
    image.save(thumbnail_file, frame_ext)

    w, h = image.size
    return thumbnail_file.getvalue(), w, h 
開發者ID:tulir,項目名稱:mautrix-telegram,代碼行數:21,代碼來源:file_transfer.py

示例5: create_summary

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def create_summary(filename, regions):
    """ Join segments

    Args:
        filename(str): filename
        regions():
    Returns:
        VideoFileClip: joined subclips in segment

    """
    subclips = []
    input_video = VideoFileClip(filename)
    last_end = 0
    for (start, end) in regions:
        subclip = input_video.subclip(start, end)
        subclips.append(subclip)
        last_end = end
    return concatenate_videoclips(subclips) 
開發者ID:OpenGenus,項目名稱:vidsum,代碼行數:20,代碼來源:sum.py

示例6: movie_in_movie

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def movie_in_movie(movie1_fname, movie2_fname, output_fname, pos=('right', 'bottom'), movie2_ratio=(1/3, 1/3),
                   margin=6, margin_color=(255, 255, 255), audio=False, fps=24, codec='libx264'):
    from moviepy import editor
    movie1 = editor.VideoFileClip(movie1_fname, audio=audio)
    w, h = movie1.size

    # THE PIANO FOOTAGE IS DOWNSIZED, HAS A WHITE MARGIN, IS
    # IN THE BOTTOM RIGHT CORNER
    movie2 = (editor.VideoFileClip(movie2_fname, audio=False).
             resize((w * movie2_ratio[0], h * movie2_ratio[1])).  # one third of the total screen
             margin(margin, color=margin_color).  # white margin
             margin(bottom=20, right=20, top=20, opacity=0).  # transparent
             set_pos(pos))

    final = editor.CompositeVideoClip([movie1, movie2])
    final.write_videofile(output_fname, fps=fps, codec=codec) 
開發者ID:pelednoam,項目名稱:mmvt,代碼行數:18,代碼來源:movies_utils.py

示例7: add_text_to_movie

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def add_text_to_movie(movie_fol, movie_name, out_movie_name, subs, fontsize=50, txt_color='red', font='Xolonium-Bold',
                      subs_delim=' ', bg_color=None):
    # Should install ImageMagick
    # For centos6: https://www.vultr.com/docs/install-imagemagick-on-centos-6
    # For centos7: http://helostore.com/blog/install-imagemagick-on-centos-7
    from moviepy import editor

    def annotate(clip, txt, txt_color=txt_color, fontsize=fontsize):
        """ Writes a text at the bottom of the clip. """
        # To make this code works the policy.xml should be editted
        #  identify -list policy
        # sudo gedit /etc/ImageMagick/policy.xml &
        # Put under comment the TEXT and LABEL lines
        txtclip = editor.TextClip(txt, fontsize=fontsize, color=txt_color)  # font=font
        # txtclip = txtclip.on_color((clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, 'center'))
        cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
        return cvc.set_duration(clip.duration)

    if isinstance(subs, str):
        subs = import_subs(movie_fol, subs, subs_delim)
    video = editor.VideoFileClip(op.join(movie_fol, movie_name))
    annotated_clips = [annotate(video.subclip(from_t, to_t), txt) for (from_t, to_t), txt in subs]
    final_clip = editor.concatenate_videoclips(annotated_clips)
    final_clip.write_videofile(op.join(movie_fol, out_movie_name)) 
開發者ID:pelednoam,項目名稱:mmvt,代碼行數:26,代碼來源:movies_utils.py

示例8: one_pic_to_video

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def one_pic_to_video(image_path, output_video_path, fps, time):
    """
    一張圖片合成視頻
    one_pic_to_video('./../source/1.jpeg', './../source/output.mp4', 25, 10)
    :param path: 圖片文件路徑
    :param output_video_path:合成視頻的路徑
    :param fps:幀率
    :param time:時長
    :return:
    """

    image_clip = ImageClip(image_path)
    img_width, img_height = image_clip.w, image_clip.h

    # 總共的幀數
    frame_num = (int)(fps * time)

    img_size = (int(img_width), int(img_height))

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    video = cv2.VideoWriter(output_video_path, fourcc, fps, img_size)

    for index in range(frame_num):
        frame = cv2.imread(image_path)
        # 直接縮放到指定大小
        frame_suitable = cv2.resize(frame, (img_size[0], img_size[1]), interpolation=cv2.INTER_CUBIC)

        # 把圖片寫進視頻
        # 重複寫入多少次
        video.write(frame_suitable)

    # 釋放資源
    video.release()

    return VideoFileClip(output_video_path) 
開發者ID:xingag,項目名稱:tools_python,代碼行數:38,代碼來源:img_utils.py

示例9: get_image

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def get_image(video, dst, frame_list):
	'''
	get image from the video
	frame_list = [1, 3, 5, 7, 9]
	'''
	clip = VideoFileClip(video)

	for t in frame_list:
		imgpath = os.path.join(dst, '{}.jpg'.format(t))
		clip.save_frame(imgpath, t) 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:12,代碼來源:gen_video.py

示例10: create_mtg_gif

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def create_mtg_gif(name, id, border):
    if border == 'm':   # Modern (post-8th Ed)
        card_upper_corner = (19, 38)
        gif_width = 202 - card_upper_corner[0]
        gif_height = 172 - card_upper_corner[1]
    elif border == 'c':   # Current (post-Magic 2015)
        card_upper_corner = (17, 34)
        gif_width = 204 - card_upper_corner[0]
        gif_height = 173 - card_upper_corner[1]
    else:   # Old (pre-8th Ed)
        card_upper_corner = (25, 30)
        gif_width = 196 - card_upper_corner[0]
        gif_height = 168 - card_upper_corner[1]

    mtg_card = Image.open(BytesIO(requests.get(get_mtg_image(id)).content))
    mtg_card = ImageClip(np.asarray(mtg_card)).resize((222, 310))

    get_giphy_gif(name)
    giphy_gif = (VideoFileClip('giphy_gif.mp4',
                               target_resolution=(gif_height, gif_width))
                 .set_pos(card_upper_corner)

                 )

    if giphy_gif.duration < 2:
        giphy_gif = giphy_gif.fx(loop, n=1+int(2 // giphy_gif.duration))

    mtg_gif = CompositeVideoClip([mtg_card, giphy_gif])
    mtg_gif = mtg_gif.set_start(0).set_duration(giphy_gif.duration)
    # mtg_gif.write_gif("mtg_gif.gif")
    mtg_gif.write_videofile("mtg_gif.mp4", codec='libx264',
                            bitrate=str(np.power(10, 7)), verbose=False,
                            progress_bar=False,
                            audio=False, ffmpeg_params=['-pix_fmt', 'yuv420p']) 
開發者ID:minimaxir,項目名稱:magic-the-gifening,代碼行數:36,代碼來源:utils.py

示例11: edit_video

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def edit_video(video):
  clips = [mp.VideoFileClip(video['file_or_url'])]

  for effect in video['effects']:
    clips.extend(get_effects(clips[0], effect))
 
  video = mp.CompositeVideoClip(clips)

  return video 
開發者ID:google,項目名稱:starthinker,代碼行數:11,代碼來源:run.py

示例12: configureVideo

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def configureVideo(self, upload_id, video, thumbnail, caption=''):
        clip = VideoFileClip(video)
        self.uploadPhoto(photo=thumbnail, caption=caption, upload_id=upload_id)
        data = json.dumps({
            'upload_id': upload_id,
            'source_type': 3,
            'poster_frame_index': 0,
            'length': 0.00,
            'audio_muted': False,
            'filter_type': 0,
            'video_result': 'deprecated',
            'clips': {
                'length': clip.duration,
                'source_type': '3',
                'camera_position': 'back',
            },
            'extra': {
                'source_width': clip.size[0],
                'source_height': clip.size[1],
            },
            'device': self.DEVICE_SETTINTS,
            '_csrftoken': self.token,
            '_uuid': self.uuid,
            '_uid': self.username_id,
            'caption': caption,
        })
        return self.SendRequest('media/configure/?video=1', self.generateSignature(data)) 
開發者ID:Datalux,項目名稱:Osintgram,代碼行數:29,代碼來源:InstagramAPI.py

示例13: create_supercut

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def create_supercut(regions):
    subclips = []
    filenames = set(map(lambda (filename, _): filename, regions))
    video_files = {filename: VideoFileClip(filename) for filename in filenames}
    for filename, region in regions:
        subclip = video_files[filename].subclip(*region)
        subclips.append(subclip)
    if not subclips: return None
    return concatenate_videoclips(subclips) 
開發者ID:agermanidis,項目名稱:thingscoop,代碼行數:11,代碼來源:utils.py

示例14: create_compilation

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def create_compilation(filename, index):
    dims = get_video_dimensions(filename)
    subclips = []
    video_file = VideoFileClip(filename)
    for label in sorted(index.keys()):
        label_img_filename = create_title_frame(label_as_title(label), dims)
        label_clip = ImageClip(label_img_filename, duration=2)
        os.remove(label_img_filename)
        subclips.append(label_clip)
        for region in index[label]:
            subclip = video_file.subclip(*region)
            subclips.append(subclip)
    if not subclips: return None
    return concatenate_videoclips(subclips) 
開發者ID:agermanidis,項目名稱:thingscoop,代碼行數:16,代碼來源:utils.py

示例15: create_supercut_in_batches

# 需要導入模塊: from moviepy import editor [as 別名]
# 或者: from moviepy.editor import VideoFileClip [as 別名]
def create_supercut_in_batches(composition, outputfile, padding):
    """Create & concatenate video clips in groups of size BATCH_SIZE and output
    finished video file to output directory.
    """
    total_clips = len(composition)
    start_index = 0
    end_index = BATCH_SIZE
    batch_comp = []
    while start_index < total_clips:
        filename = outputfile + '.tmp' + str(start_index) + '.mp4'
        try:
            create_supercut(composition[start_index:end_index], filename, padding)
            batch_comp.append(filename)
            gc.collect()
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
        except:
            start_index += BATCH_SIZE
            end_index += BATCH_SIZE
            next

    clips = [VideoFileClip(filename) for filename in batch_comp]
    video = concatenate(clips)
    video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')


    # remove partial video files
    for filename in batch_comp:
        os.remove(filename)

    cleanup_log_files(outputfile) 
開發者ID:antiboredom,項目名稱:videogrep,代碼行數:33,代碼來源:videogrep.py


注:本文中的moviepy.editor.VideoFileClip方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。