本文整理汇总了Python中pydub.AudioSegment.empty方法的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment.empty方法的具体用法?Python AudioSegment.empty怎么用?Python AudioSegment.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pydub.AudioSegment
的用法示例。
在下文中一共展示了AudioSegment.empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_sounds
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def process_sounds(sounds, file_format, bucket, s3_extension, sample_duration, fade_duration, sample_start):
preview = AudioSegment.empty()
sample_filenames = []
for count, sound in enumerate(sounds, 1):
print('\nDownloading and sampling {} of {}, {:.0f}% complete'.format(count, len(sounds), (count / len(sounds)) * 100))
print(sound['name'], sound['url'])
key = bucket.get_key(sound['id'] + s3_extension if s3_extension else sound['id'])
source_filename = tempfile.NamedTemporaryFile(prefix='/tmp/', suffix='.{}'.format(file_format)).name
sample_filename = tempfile.NamedTemporaryFile(prefix='/tmp/', suffix='.{}'.format(file_format)).name
get_sample_from_key.delay(source_filename, sample_filename, key, file_format, sound, sample_start, sample_duration)
sample_filenames.append(sample_filename)
wait(get_sample_from_key)
for count, sample_filename in enumerate(sample_filenames, 1):
print('\nProcessing {} of {}, {:.0f}% complete'.format(count, len(sounds), (count / len(sounds)) * 100))
print(sample_filename)
sample = AudioSegment.from_file(sample_filename, format=file_format)
#Append sample with cross fade
preview = preview.append(sample, crossfade=fade_duration * config.one_second) if preview else sample
return preview
示例2: txt_to_voice
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def txt_to_voice(text, name='test', export_path=EXPORT_PATH):
"""
将文字转换为音频
:param text: 需要转换的文字
:param name: 生成的音频文件名
:return:
"""
pinyin_list = lazy_pinyin(text, style=TONE3)
new = AudioSegment.empty()
for piny in pinyin_list:
piny_song = VOICE_DICT.get(piny)
if piny_song is None and piny and piny[-1] not in '0123456789':
# 没有音调
piny = piny + '5'
piny_song = VOICE_DICT.get(piny, silent)
# 交叉渐入渐出方法
# with_style = beginning.append(end, crossfade=1500)
# crossfade 就是让一段音乐平缓地过渡到另一段音乐,crossfade = 1500 表示过渡的时间是1.5秒。
# if new and piny_song:
# crossfade = min(len(new), len(piny_song), 1500)/60
# new = new.append(piny_song, crossfade=crossfade)
if not piny_song:
continue
new += piny_song
new.export(os.path.join(export_path, "{}.mp3".format(name)), format='mp3')
示例3: createSoundFile
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def createSoundFile(morse):
dot = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\dot.wav")
dash = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\dash.wav")
#word_gap = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\void.wav")
sound_config = AudioSegment.empty()
#Splitting the morse sentence into various word codes
codes = morse.split(" ")
for morseWord in codes:
#Splitting each word code into individual codes
for item in morseWord:
#Adding dot sound for zero
if item == "0":
sound_config += dot
#Adding dash sound for one
elif item == "1":
sound_config += dash
#Adding a 100ms wait between each alphabet
else:
sound_config += AudioSegment.silent(300)
sound_config += dot[0.1:0.2]
#Exporting the sound file as output.wav
sound_config.export(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\morse.wav", format="wav")
示例4: run
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def run(key, yin_list):
new = AudioSegment.empty()
for name in yin_list:
index = wushiyintu.index(name)
new += words[index]+silent
# 每列保存为一个音频文件
new.export(os.path.join(EXPORT_PATH, "{}.mp3".format(key)), format='mp3')
示例5: processRangeForLines
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def processRangeForLines(range, lines, number_sounds):
from pydub import AudioSegment # required here due to sending off in pp
from datetime import datetime
"""helper func to translate string to sound"""
def append_string_to_audio_segment(string, segment):
for num in string:
segment = segment + number_sounds[int(num)]
return segment
TOTAL_LINES = len(lines)
TOTAL_LINES = 10
PERCENT_DENOM = TOTAL_LINES / 5 # represents how often % status is reported
audio = AudioSegment.empty() # init an output
counter = 0
for i in range:
line = lines[i]
counter += 1
if counter % PERCENT_DENOM == 0: # prints a % status every so often
print(round((counter/TOTAL_LINES) * 100), "%% concatenating.", datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
audio = append_string_to_audio_segment(line, audio)
return audio
示例6: test
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def test():
x=urllib.request.urlopen('http://audio2.broadcastify.com/505577348.mp3')
list=inspect.getmembers(AudioSegment)
for each in list:
print(each[0])
#x=urllib.request.urlopen('file:/Users/Matthew/Downloads/test_recording_cut.wav')
# print(x.read(10))
# print(x.read(10))
# print(x.read(10))
# urllib.request.urlretrieve('http://audio2.broadcastify.com/505577348.mp3', "/Users/Matthew/Documents/test1.mp3")
# t=threading.Thread(target=capture, args=('http://audio2.broadcastify.com/505577348.mp3',))
writing=True
numSilent=0
fileNum=1
numInRow=0
segFile = open('/Users/Matthew/Documents/segFile.mp3', 'wb')
currentPath="/Users/Matthew/Documents/file1.mp3"
currSeg = AudioSegment.empty()
while (fileNum<4):
segFile.write(x.read(1000))
seg = AudioSegment.from_file('/Users/Matthew/Documents/segFile.mp3')
print(seg.dBFS, end=" ")
numInRow+=1
if(numInRow>4):
print()
numInRow=0
if(seg.dBFS>-30): writing,numSilent=True,0
if(writing):
currSeg.append(seg)
if(seg.dBFS<=-30):
numSilent+=1
if(numSilent>5):
writing=False
currentSeg.export(currentPath, format="mp3")
currentSeg=AudioSegment.empty()
fileNum+=1
currentPath="/Users/Matthew/Docmuents/file%d.mp3"%fileNum
''' t.start()
示例7: create_combined_audio
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def create_combined_audio(self, manifest):
if manifest.get_issue_hash(self) == self.hash:
print("%s: Combined audio already exists" % self)
return
if len(self.articles) < manifest.get_article_count(self):
print("%s: Feed has fewer articles (%s) then preexisting combined audio (%s)." % (self, len(self.articles), manifest.get_article_count(self)))
return
# Download each article's MP3 file
for article in self.articles:
article.download()
# Combine MP3 files into a single file
combined = AudioSegment.empty()
chapters = []
_chap_start = 0
_chap_end = 0
for article in self.articles:
print("%s: Found %s with length of %s seconds" % (self, article, article.audio.duration_seconds))
_chap_end = _chap_start + (article.audio.duration_seconds * 1000)
combined += article.audio
chapters.append((article.title, int(_chap_start), int(_chap_end)))
_chap_start = _chap_end
# Export the new combined file
combined.export(self.local, format=FORMAT, bitrate="128k")
# Extract the cover image from the first article MP3
cover_id3 = eyed3.load(self.articles[0].local)
cover_img_frame = cover_id3.tag.images.get('')
# Set cover image on combined file
id3 = eyed3.load(self.local)
id3.tag.images._fs[b'APIC'] = cover_img_frame
# Add chapter markers to the combined file
index = 0
child_ids = []
for chapter in chapters:
element_id = ("chp{}".format(index)).encode()
title, start_time, end_time = chapter
new_chap = id3.tag.chapters.set(element_id, (start_time, end_time))
new_chap.sub_frames.setTextFrame(b"TIT2", "{}".format(title))
child_ids.append(element_id)
index += 1
id3.tag.table_of_contents.set(b"toc", toplevel=True, ordered=True, child_ids=child_ids)
# Update the manifest with the new info
manifest.save_issue(self, combined)
print("%s: Created combined audio with length of %s seconds" % (self, combined.duration_seconds))
print("%s: Saved to %s" % (self, self.local))
print("%s: Chapters:" % self)
for chap in id3.tag.chapters:
print("%s: - %s" % (self, chap.sub_frames.get(b"TIT2")[0]._text))
# Save ID3 tags
id3.tag.save()
示例8: main
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def main():
parser = OptionParser(usage="usage: %prog [options] arg [audio files]", description="Merge and join audio files")
parser.add_option("-m", "--merge", action="store_true",
help="Merge the audio files passed as args, all files will start at 0 sg")
parser.add_option("-j", "--join", action="store_true",
help="Joins the audio files passed as args in the same order")
parser.add_option("-d", "--destination", action="store",
help="Set destination path, current folder as default")
parser.add_option("-o", "--output", action="store", help="Output file name, date string as default")
try:
opts, args = parser.parse_args()
except OptParseError as e:
print(e)
sys.exit(2)
# Input verifications
if opts.join is None and opts.merge is None:
print("Choose -j to Join 2 or more file either -m to merge those files")
sys.exit(1)
if opts.output is None:
opts.output = str(datetime.now()) + '.mp3'
if opts.destination is None:
opts.destination = os.getcwd()
# If merge option was choose
if opts.merge is True and opts.join is None:
audio_list = {}
# Read audio sequences and order it from larger to shorter
index = 0
for arg in args:
segment = AudioSegment.from_file(arg, format=str(arg).split(".")[1])
audio_list[index] = [float(segment.duration_seconds), segment]
index += 1
ordered_segments = OrderedDict(sorted(audio_list.items(), key=lambda t: t[1][0], reverse=True))
first = True
for k, (dur, seg) in ordered_segments.items():
if first is True:
# Create a silent base audio of length from the longest sequence from the loaded list
audio = AudioSegment.silent(duration=dur * 1000)
first = False
audio = audio.overlay(seg.set_channels(2))
# Save the audio results
audio.export(opts.destination + opts.output, format=str(opts.output).split(".")[1])
# Print the full path of the created audio file
print(opts.destination + opts.output)
if opts.join is True and opts.merge is None:
# Create a 0 seg long sequence for base
audio = AudioSegment.empty()
for arg in args:
# Load and attach the list of sequences given as args
audio += AudioSegment.from_file(arg, format=str(arg).split(".")[1])
# Save the audio results
audio.export(opts.destination + opts.output, format=str(opts.output).split(".")[1])
# Print the full path of the created audio file
print(opts.destination + opts.output)
示例9: generate_combined_audio
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def generate_combined_audio(urls):
main_audio = AudioSegment.empty()
silence = AudioSegment.silent(duration=1000)
tmp_path = os.path.join("/tmp", "tempsong.mp3")
target_path = "./Combined.mp3"
for url in urls:
urllib.urlretrieve(url, tmp_path)
song = AudioSegment.from_mp3(tmp_path)
main_audio = main_audio + silence + song
main_audio.export(target_path, format="mp3")
os.remove(tmp_path)
示例10: render_dialog
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def render_dialog(self, file_list = None):
"""Uses a file list to render the dialog"""
if not file_list is None:
self.file_list = file_list
#loading all mp3 files
full_dialog = AudioSegment.empty()
silence = AudioSegment.silent(100)
for i, filename in enumerate(self.file_list):
full_dialog += AudioSegment.from_mp3(filename) + silence
rendered_file_name = self.get_render_path() + hashlib.md5("-".join(self.file_list)).hexdigest() + ".ogg"
full_dialog.export(rendered_file_name, format="ogg", codec="libvorbis")
return rendered_file_name
示例11: process
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def process(fileName):
client = MongoClient()
db = client.audiograins
grainEntries = db.grains
song = AudioSegment.empty()
songFile = open(fileName)
print("converting to mono")
monoFilename = "mono" + fileName
subprocess.check_call(["ffmpeg -i " + fileName + " -ac 1 -threads 2 " + monoFilename], close_fds=True, shell=True)
grains = granulizer.chopSound(monoFilename, 20, "inputGrains", "sample")
labels = ["chorus", "hip-hop", "latin", "orchestra", "pop", "rock", "country", "jazz", "opera", "piano", "reggae", "techno"]
classifications = [0] * 12;
totalGrains = 0
normalizer = pickle.load(open("normalizer.pickle"))
classifier = pickle.load(open("classifier.pickle"))
indexToIds = pickle.load(open("indexToIds.pickle"))
for grain in tqdm(grains):
#Analyze all stats for grain
totalGrains += 1
dataPoint = np.empty([1, 16])
mfccs = analyzer.analyzeMFCC(grain)
dataPoint[0][0] = mfccs[0]
dataPoint[0][1] = mfccs[1]
dataPoint[0][2] = mfccs[2]
dataPoint[0][3] = mfccs[3]
dataPoint[0][4] = mfccs[4]
dataPoint[0][5] = mfccs[5]
dataPoint[0][6] = mfccs[6]
dataPoint[0][7] = mfccs[7]
dataPoint[0][8] = mfccs[8]
dataPoint[0][9] = mfccs[9]
dataPoint[0][10] = mfccs[10]
dataPoint[0][11] = mfccs[11]
dataPoint[0][12] = mfccs[12]
dataPoint[0][13] = analyzer.analyzePitch(grain)
dataPoint[0][14] = analyzer.analyzeEnergy(grain)
#dataPoint[15] = kurtosis
#dataPoint[16] = skewness
#dataPoint[17] = spread
#dataPoint[18] = centroid
dataPoint[0][15] = analyzer.analyzeZeroCrossingRate(grain)
classifications[int(classifier.predict(normalizer.transform(dataPoint))[0])] += 1
#print("Prediction: " + str(classifier.predict(normalizer.transform(dataPoint))[0]))
print("Genre:")
for i in range(0, 12):
print("\t" + labels[i] + ": " + str(((float(classifications[i]) / float(totalGrains)) * 100.0)) + "%")
示例12: compose
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def compose(segments, out="out.mp3", padding=0, crossfade=0, layer=False):
"""Stiches together a new audiotrack"""
files = {}
working_segments = []
audio = AudioSegment.empty()
if layer:
total_time = max([s["end"] - s["start"] for s in segments]) * 1000
audio = AudioSegment.silent(duration=total_time)
for i, s in enumerate(segments):
try:
start = s["start"] * 1000
end = s["end"] * 1000
f = s["file"].replace(".transcription.txt", "")
if f not in files:
if f.endswith(".wav"):
files[f] = AudioSegment.from_wav(f)
elif f.endswith(".mp3"):
files[f] = AudioSegment.from_mp3(f)
segment = files[f][start:end]
print start, end, f
if layer:
audio = audio.overlay(segment, times=1)
else:
if i > 0:
audio = audio.append(segment, crossfade=crossfade)
else:
audio = audio + segment
if padding > 0:
audio = audio + AudioSegment.silent(duration=padding)
s["duration"] = len(segment)
working_segments.append(s)
except:
continue
audio.export(out, format=os.path.splitext(out)[1].replace(".", ""))
return working_segments
示例13: compose
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def compose(segments, out='out.mp3', padding=0, crossfade=0, layer=False):
'''Stiches together a new audiotrack'''
files = {}
working_segments = []
audio = AudioSegment.empty()
if layer:
total_time = max([s['end'] - s['start'] for s in segments]) * 1000
audio = AudioSegment.silent(duration=total_time)
for i, s in enumerate(segments):
try:
start = s['start'] * 1000
end = s['end'] * 1000
f = s['file'].replace('.transcription.txt', '')
if f not in files:
if f.endswith('.wav'):
files[f] = AudioSegment.from_wav(f)
elif f.endswith('.mp3'):
files[f] = AudioSegment.from_mp3(f)
segment = files[f][start:end]
print start, end, f
if layer:
audio = audio.overlay(segment, times=1)
else:
if i > 0:
audio = audio.append(segment, crossfade=crossfade)
else:
audio = audio + segment
if padding > 0:
audio = audio + AudioSegment.silent(duration=padding)
s['duration'] = len(segment)
working_segments.append(s)
except:
continue
audio.export(out, format=os.path.splitext(out)[1].replace('.', ''))
return working_segments
示例14: conmbine
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def conmbine(singer_item='zhangxulong', dir='album'):
combine_songs = []
for parent, dirname, filename in os.walk(dir):
for file in filename:
path = os.path.join(parent, file)
singer_name = path.split('/')[1]
if singer_name == singer_item:
combine_songs.append(path)
# print singer_name
all_sound = AudioSegment.empty()
for sound in combine_songs:
all_sound += AudioSegment.from_mp3(sound)
output_dir = "combined/" + singer_item + ".wav"
if not os.path.exists("combined/"):
os.makedirs("combined/")
single_sound = all_sound.set_channels(1)
single_sound.export(output_dir, format="wav")
# single_sound.export(output_dir, format="wav",bitrate="160k")
return 0
示例15: compile_supercut
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import empty [as 别名]
def compile_supercut(prnc_dict, word, file_name, show_name):
"""
Creates a supercut of all the instances of a particular pronunciation of a
given word in a podcast series.
"""
pad_length = 500. # in milliseconds
prev_file = None
word_dict = prnc_dict[word]
silence = AudioSegment.silent(duration=1000)
for prnc in word_dict.keys():
supercut = AudioSegment.empty()
locations = np.asarray(word_dict[prnc]['locations'])
timesteps = np.asarray(word_dict[prnc]['timesteps']) * 1000.
for ((line, file_id), (start, stop)) in zip(locations[:30], timesteps[:30]):
if prev_file == file_id:
pass
else:
path = './alignment_data/seg_audio/{}.wav'.format(file_id)
audio = AudioSegment.from_file(path, format='wav', channels=1,
sample_width=2)
seg_len = (audio.frame_count() / audio.frame_rate) * 1000.
prev_file = file_id
if start < pad_length:
start = pad_length
if stop + pad_length > seg_len:
stop = seg_len - pad_length
supercut += silence + audio[start - pad_length:stop + pad_length]
supercut_fp = "./alignment_data/supercuts/{}_{}_{}.wav"\
.format(word, prnc, file_name)
tag_dict = {"word": word, "pronunciation": prnc, "show": show_name}
supercut.export(supercut_fp, format="wav", tags=tag_dict)