本文整理汇总了Python中moviepy.editor.VideoFileClip.subclip方法的典型用法代码示例。如果您正苦于以下问题:Python VideoFileClip.subclip方法的具体用法?Python VideoFileClip.subclip怎么用?Python VideoFileClip.subclip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类moviepy.editor.VideoFileClip
的用法示例。
在下文中一共展示了VideoFileClip.subclip方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_clip
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def get_clip(video_title, start=(0,0), seconds=5):
video = VideoFileClip(video_title)
end = (start[0], start[1]+seconds)
clip = video.subclip(start, end)
return clip
示例2: processProjectVideo
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def processProjectVideo():
CHALLENGEVIDEOOUTPUT = 'test_videos_output/ChallengeVideoOutput%04d.mp4'
PROJECTVIDEOOUTPUT = './output_images/video/ProjectVideoOutput_%04d-%04d.mp4'
CLIPLENGTH=10 # process 10 second clips
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
video = VideoFileClip('./project_video.mp4')
#video = VideoFileClip("./test_video.mp4")
duration = video.duration
#duration=31.4
numberOfClips=int(round(duration/float(CLIPLENGTH)+0.5))
print ("processProjectVideo-duration:", duration, ", CLIPLENGTH:", CLIPLENGTH, ", numberOfClips:", numberOfClips)
for clipNumber in range(0, numberOfClips): #
clipStart=clipNumber*CLIPLENGTH
clipStop=min((clipNumber+1)*CLIPLENGTH,duration)
print ("processProjectVideo-clipNumber:", clipNumber, ", clipStart:", clipStart, ", clipStop:", clipStop)
videoClip = video.subclip(clipStart,clipStop)
annotatedClip = videoClip.fl_image(process_image)
videoFileName=PROJECTVIDEOOUTPUT % (clipStart,clipStop)
print ("processProjectVideo-videoFileName:", videoFileName)
annotatedClip.write_videofile(videoFileName, audio=False)
示例3: create_compilation
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def create_compilation(filename, index):
dims = get_video_dimensions(filename)
subclips = []
video_file = VideoFileClip(filename)
for label in sorted(index.keys()):
label_img_filename = create_title_frame(label_as_title(label), dims)
label_clip = ImageClip(label_img_filename, duration=2)
os.remove(label_img_filename)
subclips.append(label_clip)
for region in index[label]:
subclip = video_file.subclip(*region)
subclips.append(subclip)
if not subclips: return None
return concatenate_videoclips(subclips)
示例4: create_trump_dataset
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def create_trump_dataset(movie_path, target_folder):
# To write video to file: clip.write_videofile(outfile, codec='libx264', audio_codec='aac', temp_audiofile='china-%02d.m4a' % i, remove_temp=True)
# moviepy help: http://zulko.github.io/blog/2014/06/21/some-more-videogreping-with-python/
# https://zulko.github.io/moviepy/ref/ref.html
cuts = [(1.7, 2.5), (4.2, 4.6), (4.7, 5.2), (5.35, 5.93), (5.95, 6.45), (6.45, 6.95), (7, 7.34), (7.38, 7.82), (7.85, 8.24), (8.44, 9.04), (9.43, 9.7), (16.44, 16.7), (16.77, 17), (17, 17.31), (17.39, 17.67), (17.9, 18), (18.56, 18.8), (19, 19.4), (19.41, 19.75), (19.78, 20), (20.75, 21), (21, 21.52), (21.9, 22.41), (23, 23.52), (23.7, 23.96), (24.4, 24.7), (24.73, 24.98), (25, 25.38), (26.63, 27.15), (30, 30.36), (31.3, 31.77), (31.9, 32.16), (32.2, 32.5), (32.9, 33.16), (33.23, 33.4), (33.47, 33.79), (33.81, 34.25), (34.3, 34.65), (34.75, 35.23), (35.27, 35.95), (36.03, 36.59), (36.63, 37.04), (38.66, 39.1), (39.85, 40.3), (40.4, 40.75), (40.83, 41.271), (41.59, 41.95), (42.96, 43.33), (43.58, 43.88), (44, 44.6), (47, 47.48), (50.45, 50.75), (51, 51.33), (52.15, 52.48), (58.3, 58.55), (59, 59.4), (60, 60.4), (61.35, 61.71), (62.44, 62.8), (64.3, 64.6), (65.15, 65.58), (67.45, 67.8), (68.8, 69.15), (69.27, 69.6), (70.63, 70.97), (71, 71.4), (72.35, 72.8), (73.3, 73.7), (74.2, 74.61), (76, 76.9), (80.3, 80.65), (81.1, 81.4), (82.4, 82.75), (83.52, 84), (84.14, 84.49), (85.3, 85.6), (86.1, 86.4), (86.8, 87), (87.1, 87.48), (88, 88.2), (88.9, 89.37), (90.3, 90.7), (90.9, 91.2), (91.3, 91.5), (91.55, 91.78), (91.79, 92.06), (92.33, 92.67), (93.3, 93.55), (94.2, 94.5), (96.6, 96.96), (98, 98.44), (98.9, 99.1), (99.14, 99.53), (100.68, 100.92), (100.93, 101.25), (101.45, 101.8), (102.7, 102.96), (103.7, 104), (105.2, 105.7), (105.88, 106.1), (106.2, 106.6), (106.65, 107), (107.05, 107.85), (108.57, 109), (109.1, 109.48), (110.24, 110.74), (113.5, 113.85), (115.12, 115.4), (115.8, 116.25), (116.56, 116.95), (117.95, 118.35), (118.9, 119.3), (119.6, 120.2), (120.4, 120.9), (121.48, 121.9), (122.95, 123.25), (124.25, 124.65), (125, 125.39), (129.58, 129.9), (130.9, 131.3), (131.8, 132.15), (135, 135.5), (135.75, 136.1), (136.2, 136.65), (137, 137.4), (138.55, 138.8), (145.3, 145.75), (152.1, 152.5), (154.8, 155.25), (156.68, 156.95), (157.3, 157.8), (159.4, 159.78), (159.8, 160), (160.46, 160.8), (162.6, 163), (163.9, 164.18), (164.25, 164.63), (164.64, 165.1), (165.33, 165.7), (165.73, 166.1), (166.28, 166.58), (166.6, 167.06), (167.27, 167.65), (167.69, 168), (168.05, 168.45), (168.93, 169.25), (169.28, 169.6), (169.7, 170.15), (171.82, 172.24), (172.8, 173.1), (173.2, 173.6), (174.6, 175.04), (175.2, 175.6), (177, 177.35), (178.55, 178.97)]
video = VideoFileClip(movie_path)
subclips = [video.subclip(start, end) for (start, end) in cuts]
for i in xrange(len(subclips)):
clip = subclips[i]
video_outfile = os.path.join(target_folder, 'video', 'china-%03d.mp4' % i)
audio_outfile = os.path.join(target_folder, 'audio', 'china-%03d.m4a' % i)
clip.write_videofile(video_outfile, codec='libx264', audio=False)
clip.audio.write_audiofile(audio_outfile, codec='aac')
return True
示例5: create_summary
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def create_summary(filename, regions):
""" Join segments
Args:
filename(str): filename
regions():
Returns:
VideoFileClip: joined subclips in segment
"""
subclips = []
input_video = VideoFileClip(filename)
last_end = 0
for (start, end) in regions:
subclip = input_video.subclip(start, end)
subclips.append(subclip)
last_end = end
return concatenate_videoclips(subclips)
示例6: excerpt_and_compile_video_file
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def excerpt_and_compile_video_file(src_path, dest_path, timestamps,
left_padding=0.01,
right_padding=0.01,
video_codec=DEFAULT_VIDEO_CODEC,
audio_codec=DEFAULT_VIDEO_AUDIO_CODEC):
"""
creates a new video compiled from cuts of `src_path`
timestamps (list): a sequence of tuples, in (start, end) (in seconds)
"""
video = VideoFileClip(src_path)
max_ts = video.duration
clips = []
for ts in timestamps:
x = max(0, ts[0] - left_padding)
y = min(max_ts, ts[1] + right_padding)
clips.append(video.subclip(x, y))
allclips = concatenate_videoclips(clips)
allclips.write_videofile(dest_path, codec=video_codec, audio_codec=audio_codec)
return dest_path
示例7: make_clip
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def make_clip(path,out_dir,framerate,start,end):
"""
create gifs - given path and timecode-
"""
video = VideoFileClip(path)
print "loaded"
clip = video.subclip(start,end).without_audio()
frame = clip.get_frame(1)
"""get average luma"""
lumas = []
for l in frame:
for f in l:
r,g,b = f
luma = 0.2126*r + 0.7152*g + 0.0722*b
lumas.append(luma)
average_luma = round(sum(lumas)/float(len(lumas)),2)
if average_luma < 40: #gets rid of dark scenes (bad scenes)
return 0
mp4_file = os.path.join(out_dir,'clip_{0}.mp4'.format(start))
gif_file = os.path.join(out_dir,'clip_{0}.gif'.format(start))
score = 0
print "creating clip from {0} to {1}".format(start,end)
if os.path.isfile(mp4_file) == False:
clip.write_videofile(mp4_file,fps=framerate)
score += 0.5
if os.path.isfile(gif_file) == False:
clip.write_gif(gif_file,
fps=framerate,
program="ffmpeg")
score += 0.5
return score
示例8: createHighlights
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
def createHighlights(inDir, inFile, inExt, compression_rate):
inDir = inDir + inFile + inExt
cap = cv2.VideoCapture(inDir)
clip = VideoFileClip(inDir)
fps = int(cap.get(5))
num_frames = cap.get(7)
audiopeaks = getAudioPeaks(inDir)
num_seconds = num_frames*fps*compression_rate
max_interval = ( num_seconds/(2*len(audiopeaks)) )*fps
audiopeaks[:] = [fps*x for x in audiopeaks]
print("FPS: " + str(fps) + " \n#Frames: " + str(num_frames))
print('Found audio peaks at frames:\n\t' + str(audiopeaks))
best_cuts = []
for peak in audiopeaks:
print("\tFinding before and after cuts for audio peak: " + str(peak))
currt = current_milli_time()
start = max(0, peak - max_interval)
end = min(int(num_frames), peak + max_interval)
print("\t\tbefore:")
before_cuts = getCutScenes(inDir, start, peak, 5, True)
print("\t\t\t" + str(before_cuts))
print("\t\tafter:")
after_cuts = getCutScenes(inDir, peak, end, 5, True)
print("\t\t\t" + str(after_cuts))
best_cut = [before_cuts[0] if len(before_cuts) > 0 else start, after_cuts[0] if len(after_cuts) > 0 else end]
extra_interval_allowance = max_interval*0.667
print("\t\t\Initial bestcuts for audiopeak:" + str(best_cut))
curr = peak
for cut in before_cuts:
if(curr - cut < extra_interval_allowance and curr - cut > 1):
best_cut[0] = cut
curr = cut
print("\t\t\tUpdated beforecut to frame:" + str(curr))
curr = peak
for cut in after_cuts:
if(cut - curr < extra_interval_allowance and cut - curr > 1):
best_cut[1] = cut
curr = cut
print("\t\t\tUpdated aftercut to frame:" + str(curr))
print("\t\tBestcuts for audiopeak:")
print("\t\t\t" + str(best_cut))
best_cuts.append(best_cut)
print("\tFinished finding cuts for " + str(peak) + " in " + str((current_milli_time() - currt) / 1000) + " secs")
print("\n\nBest cuts:")
print("\t" + str(best_cuts))
final = concatenate([clip.subclip(s*1.0/fps,e*1.0/fps) for s,e in best_cuts])
directory = 'results/' + inFile
if not os.path.exists(directory):
os.makedirs(directory)
final.to_videofile(directory + '/hl.mp4') # low quality is the default
示例9: concatenate
# 需要导入模块: from moviepy.editor import VideoFileClip [as 别名]
# 或者: from moviepy.editor.VideoFileClip import subclip [as 别名]
"""
increases = np.diff(smooth_volumes)[:-1] >= 0
decreases = np.diff(smooth_volumes)[1:] <= 0
peak_times = (increases * decreases).nonzero()[0]
peak_volumes = smooth_volumes[peak_times]
peak_times = peak_times[peak_volumes > np.percentile(peak_volumes,90)]
"""
For at least sporting events we can refine the peak times to
group those that are less than one minute apart. The assumption
is that these times most likely correspond to the same event
"""
highlight_times = [peak_times[0]]
for time in peak_times:
if(time - highlight_times[-1]) < 60:
if smooth_volumes[time] > smooth_volumes[highlight_times[-1]]:
highlight_times[-1] = time #use the time with the highest volume in chunks of 60 sec
else:
highlight_times.append(time)
"""
Final times contains the times in seconds of the most important
events based on this naive sound model. For each event, we can now
cut the original video 5 seconds before its time and stop 5 seconds
after its time to get 11 second clips for each event.
TODO: play around with this span
"""
final_highlights = concatenate([clip.subclip(max(time-5,0),min(time+5,clip.duration))
for time in highlight_times])
final_highlights.to_videofile('barca_madrid_highlights.mp4',fps=60)
print "Reely is done generating highlight for the video"