本文整理汇总了Python中pydub.AudioSegment.silent方法的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment.silent方法的具体用法?Python AudioSegment.silent怎么用?Python AudioSegment.silent使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pydub.AudioSegment
的用法示例。
在下文中一共展示了AudioSegment.silent方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: addFrameWithTransitionAndPause
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def addFrameWithTransitionAndPause(self, image_file, audio_file, transition_file, pause):
media_info = MediaInfo.parse(transition_file)
duration_in_ms = media_info.tracks[0].duration
audio_file = r"%s"%audio_file
f = sf.SoundFile(audio_file)
try:
audio_clip = AudioSegment.from_wav(audio_file)
except:
print("error with frame audio transition pause for %s" % audio_file)
audio_clip = AudioSegment.silent(duration=pause)
duration = (len(f) / f.samplerate)
audio_clip_with_pause = audio_clip
self.imageframes.append(image_file)
self.audiofiles.append(audio_clip_with_pause)
self.durations.append(duration + (pause/1000))
self.transitions.append((transition_file, len(self.imageframes) - 1, (duration_in_ms / 1000) + (pause/1000)))
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:18,代码来源:generatemovie.py
示例2: extract_words
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def extract_words(files):
''' Extracts individual words form files and exports them to individual files. '''
output_directory = 'extracted_words'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
for f in files:
file_format = None
source_segment = None
if f.lower().endswith('.mp3'):
file_format = 'mp3'
source_segment = AudioSegment.from_mp3(f)
elif f.lower().endswith('.wav'):
file_format = 'wav'
source_segment = AudioSegment.from_wav(f)
if not file_format or source_segment:
print('Unsupported audio format for ' + f)
sentences = convert_timestamps(files)
for s in sentences:
for word in s['words']:
start = float(word[1]) * 1000
end = float(word[2]) * 1000
word = word[0]
total_time = end - start
audio = AudioSegment.silent(duration=total_time)
audio = audio.overlay(source_segment[start:end])
number = 0
output_path = None
while True:
output_filename = word
if number:
output_filename += "_" + str(number)
output_filename = output_filename + '.' + file_format
output_path = os.path.join(output_directory, output_filename)
if not os.path.exists(output_path):
# this file doesn't exist, so we can continue
break
# file already exists, increment name and try again
number += 1
print('Exporting to: ' + output_path)
audio.export(output_path, format=file_format)
示例3: addFrameWithPause
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def addFrameWithPause(self, image_file, audio_file, pause):
audio_file = audio_file.replace("\\", "/")
f = sf.SoundFile(audio_file)
audio_clip = AudioSegment.from_wav(audio_file)
duration = (len(f) / f.samplerate) + pause / 1000
audio_clip_with_pause = audio_clip + AudioSegment.silent(duration=pause)
self.imageframes.append(image_file)
self.audiofiles.append(audio_clip_with_pause)
self.durations.append(duration)
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:11,代码来源:generatemovie.py
示例4: synthesize
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def synthesize(self, text, src, dst):
"""
Synthesize .wav from text
src is the folder that contains all syllables .wav files
dst is the destination folder to save the synthesized file
"""
print("Synthesizing ...")
delay = 0
increment = 355 # milliseconds
pause = 500 # pause for punctuation
syllables = lazy_pinyin(text, style=pypinyin.TONE3)
# initialize to be complete silence, each character takes up ~500ms
result = AudioSegment.silent(duration=500*len(text))
for syllable in syllables:
path = src+syllable+".wav"
sound_file = Path(path)
# insert 500 ms silence for punctuation marks
if syllable in TextToSpeech.punctuation:
short_silence = AudioSegment.silent(duration=pause)
result = result.overlay(short_silence, position=delay)
delay += increment
continue
# skip sound file that doesn't exist
if not sound_file.is_file():
continue
segment = AudioSegment.from_wav(path)
result = result.overlay(segment, position=delay)
delay += increment
directory = dst
if not os.path.exists(directory):
os.makedirs(directory)
result.export(directory+"generated.wav", format="wav")
print("Exported.")
示例5: augment_sample
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def augment_sample(augmentation):
index, src_file, dst_file, overlays, gain = augmentation
orig_seg = AudioSegment.from_file(src_file, format="wav")
aug_seg = AudioSegment.silent(duration=len(orig_seg))
for overlay in overlays:
offset, overlay_file = overlay
overlay_seg = AudioSegment.from_file(overlay_file, format="wav")
if offset < 0:
overlay_seg = overlay_seg[-offset:]
offset = 0
aug_seg = aug_seg.overlay(overlay_seg, position=offset)
aug_seg = aug_seg + (orig_seg.dBFS - aug_seg.dBFS + gain)
orig_seg = orig_seg.overlay(aug_seg)
orig_seg.export(dst_file, format="wav")
return (index, dst_file)
示例6: synthesize
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def synthesize(self, input_text='', output_wav_path=''):
"""
Synthesize .wav from text
input_text: the folder that contains all syllables .wav files
output_wav_path: the destination folder to save the synthesized file
"""
delay = 0
increment = 355 # milliseconds
pause = 500 # pause for punctuation
syllables = lazy_pinyin(input_text, style=pypinyin.TONE3)
# initialize to be complete silence, each character takes up ~500ms
result = AudioSegment.silent(duration=500 * len(input_text))
for syllable in syllables:
path = os.path.join(self.syllables_dir, syllable + ".wav")
sound_file = Path(path)
# insert 500 sr silence for punctuation marks
if syllable in self.punctuation:
short_silence = AudioSegment.silent(duration=pause)
result = result.overlay(short_silence, position=delay)
delay += increment
continue
# skip sound file that doesn't exist
if not sound_file.is_file():
continue
segment = AudioSegment.from_wav(path)
result = result.overlay(segment, position=delay)
delay += increment
if not output_wav_path:
output_wav_path = 'out.wav'
result.export(output_wav_path, format="wav")
default_logger.debug("Exported:" + output_wav_path)
return result
示例7: compose
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def compose(segments, out='out.mp3', padding=0, crossfade=0, layer=False):
'''Stiches together a new audiotrack'''
files = {}
working_segments = []
audio = AudioSegment.empty()
if layer:
total_time = max([s['end'] - s['start'] for s in segments]) * 1000
audio = AudioSegment.silent(duration=total_time)
for i, s in enumerate(segments):
try:
start = s['start'] * 1000
end = s['end'] * 1000
f = s['file'].replace('.transcription.txt', '')
if f not in files:
if f.endswith('.wav'):
files[f] = AudioSegment.from_wav(f)
elif f.endswith('.mp3'):
files[f] = AudioSegment.from_mp3(f)
segment = files[f][start:end]
print(start, end, f)
if layer:
audio = audio.overlay(segment, times=1)
else:
if i > 0:
audio = audio.append(segment, crossfade=crossfade)
else:
audio = audio + segment
if padding > 0:
audio = audio + AudioSegment.silent(duration=padding)
s['duration'] = len(segment)
working_segments.append(s)
except:
continue
audio.export(out, format=os.path.splitext(out)[1].replace('.', ''))
return working_segments
示例8: renderVideo
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def renderVideo(self):
clips = self.videoformat.renderClips(self.content, self.title)
self.videoformat.createMovie(clips, self)
self.background_music_name = self.videoformat.music
clips = []
for i, transition in enumerate(self.transitions):
print("Putting together clip (%s/%s)" % (i + 1, len(self.transitions)))
transition_file_name = transition[0]
last_image_index = transition[1]
transition_duration = transition[2]
if i == 0:
clip = ImageSequenceClip(self.imageframes[0:last_image_index + 1], durations=self.durations[0:last_image_index + 1])
combined_sounds = sum(self.audiofiles[0:last_image_index + 1])
audio_name = "%s/%s%s.wav" % (settings.tempPath, "atestaudio", i)
combined_sounds.export(audio_name, format="wav")
video_clip = VideoFileClip(transition_file_name).fx(afx.volumex, settings.voice_volume)
audio_clip = AudioFileClip(audio_name)
clip = clip.set_audio(audio_clip)
clip_with_interval = concatenate_videoclips([clip, video_clip])
clips.append(clip_with_interval)
else:
prev_image_index = self.transitions[i-1][1]
clip = ImageSequenceClip(self.imageframes[prev_image_index + 1:last_image_index + 1], durations=self.durations[prev_image_index + 1:last_image_index + 1])
combined_sounds = sum(self.audiofiles[prev_image_index + 1:last_image_index + 1])
audio_name = "%s/%s%s.wav" % (settings.tempPath, "atestaudio", i)
combined_sounds.export(audio_name, format="wav")
video_clip = VideoFileClip(transition_file_name).fx(afx.volumex, settings.voice_volume)
audio_clip = AudioFileClip(audio_name)
clip = clip.set_audio(audio_clip)
clip_with_interval = concatenate_videoclips([clip, video_clip])
clips.append(clip_with_interval)
main_vid_duration = 0
for i in range(1, len(clips), 1):
main_vid_duration += clips[i].duration
print("Generating Audio Loop (%s) " % main_vid_duration)
print("Using Audio Loop %s" % self.background_music_name)
music_loop = afx.audio_loop(AudioFileClip(self.background_music_name).fx(afx.volumex, settings.background_music_volume),
duration=int(main_vid_duration))
music_loop.to_audiofile("%s/music-loop.wav" % settings.tempPath)
pause_time = int(clips[0].duration * 1000)
print("Adding pause to start of Audio Loop (%s) " % (pause_time / 1000))
audio_clip = AudioSegment.from_wav("%s/music-loop.wav" % settings.tempPath)
new_audio = AudioSegment.silent(duration=(pause_time)) + audio_clip
new_audio.export("%s/music-loop2.wav" % settings.tempPath, format='wav')
# here we are combining the first clip with the last
print("Combining all Video Clips %s" % (pause_time / 1000))
main_vid_combined = concatenate_videoclips(clips)
main_vid_with_audio = main_vid_combined.set_audio(CompositeAudioClip([main_vid_combined.audio, AudioFileClip("%s/music-loop2.wav" % settings.tempPath)]))
folder_location = settings.finishedvideosdirectory + "/vid%s" % self.scriptno
if not os.path.exists(folder_location):
os.makedirs(folder_location)
print("Writing video to location %s" % folder_location)
main_vid_with_audio.write_videofile("%s/%s.mp4" % (folder_location, "vid%s" % self.scriptno), threads=4,
fps=settings.movieFPS, temp_audiofile=settings.currentPath + "\\temp.mp3")
return folder_location
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:63,代码来源:generatemovie.py