本文整理汇总了Python中pydub.AudioSegment类的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment类的具体用法?Python AudioSegment怎么用?Python AudioSegment使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioSegment类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: render
def render(mix_id, *args):
minimum = sys.maxint
for arg in args:
if arg[0] < minimum:
minimum = arg[0]
for arg in args:
arg[0] -= minimum
prio_queue = Queue.PriorityQueue()
for arg in args:
prio_queue.put(arg)
base = prio_queue.get(0)
base_track = AudioSegment.from_file(base[1], "m4a")
gain = base[2]
base_track = base_track.apply_gain(gain)
while not prio_queue.empty():
overlay = prio_queue.get(0)
overlay_track = AudioSegment.from_file(overlay[1], "m4a")
gain = overlay[2]
if gain != 0:
overlay_track = overlay_track.apply_gain(gain)
base_track = base_track.overlay(overlay_track, position=overlay[0])
base_track.export('mix.wav', format='wav')
command = 'ffmpeg -b 66k -y -f wav -i ./mix.wav ./mix.aac'
subprocess.call(command, shell=True)
os.remove('mix.wav')
# concaac(mix_id, [0, "test1.m4a", 0], [5000, "test2.m4a", -10], [10000, "test3.m4a", 5])
示例2: main
def main():
converted_files = []
if not os.path.exists(OUTPUT_DIR):
try:
os.makedirs(OUTPUT_DIR)
except Exception as e:
now = datetime.now().strftime('%Y.%m.%d %H:%M')
logger.error(
"{} Ошибка при создании директории, текст: {}".format(now, e)
)
sys.exit(1)
while True:
files = [f for f in os.listdir(DIRECTORY) if os.path.isfile(
os.path.join(DIRECTORY, f))]
for f in files:
if f.split('.')[1] == EXTENSION and f not in converted_files:
new_name = f.split('.')[0] + '.mp3'
now = datetime.now().strftime('%Y.%m.%d %H:%M')
try:
AudioSegment.from_wav(os.path.join(DIRECTORY, f)).export(
os.path.join(OUTPUT_DIR, new_name), format="mp3")
converted_files.append(f)
logger.debug(
"{} Успешно переконвертировали файл {} ".format(now, f)
)
except Exception as e:
logger.error(
"{} Ошибка при конвертации файла {}, текст: {}".
format(now, f, e)
)
sys.exit(1)
示例3: start
def start(self):
# Server runs until killed
while True:
# If we have a request, play it
if len(self.request_list) != 0:
self.current_song = AudioSegment.from_mp3("../songs/" + self.request_list.popleft())
# Otherwise, play a random song
else:
self.current_song = AudioSegment.from_mp3("../songs/" + random.choice(self.songlist))
self.new_song()
# Stream the entire song
for chunk in self.current_song:
# Simply skip the time for the client
if not self.has_client:
sleep(0.001)
else:
# Stream chunk to first client
client, address = self.clients[0]
try:
chunk = chunk.raw_data
chunk = chunk[:self.chunk_size].ljust(self.chunk_size)
chunk_length = str(len(chunk))
client.sendto(bytes("SC" + chunk_length + (4-len(chunk_length))*" ", "UTF-8"), address)
client.sendto(chunk, address)
# Disconnects will be handled, just maybe not on time to avoid
# this error a few times. We just ignore the error
except BrokenPipeError:
pass
示例4: export_wav
def export_wav(self, filename):
n = self.song.num_tracks
self.song.export_song("temp/song.abc")
sounds = ["--syn_a", "--syn_b", "--syn_s", "--syn_e"]
for i in range(n):
os.system(
"python read_abc.py temp/song.abc "
+ str(i + 1)
+ " temp/out_"
+ str(i + 1)
+ ".wav "
+ random.choice(sounds)
)
os.remove("temp/song.abc")
combined = AudioSegment.from_file("temp/out_1.wav")
if n >= 2:
for i in range(1, n):
sound = AudioSegment.from_file("temp/out_" + str(i + 1) + ".wav")
combined = combined.overlay(sound)
combined.export(filename, format="wav")
for i in range(n):
os.remove("temp/out_" + str(i + 1) + ".wav")
示例5: get_data
def get_data(path):
"""
Gets the data associated with an audio file, converting to wav when necessary.
:param path: path to audio file
:return: sample rate, audio data
"""
if path.endswith(".wav"):
bee_rate, bee_data = read(path)
else:
temp = tempfile.NamedTemporaryFile(suffix=".wav")
temp.close()
if path.endswith(".flac"):
sound = AudioSegment.from_file(path, "flac")
sound.export(temp.name, format="wav")
elif path.endswith(".mp3"):
sound = AudioSegment.from_file(path, "mp3")
sound.export(temp.name, format="wav")
bee_rate, bee_data = read(temp.name)
os.remove(temp.name)
data_type = np.iinfo(bee_data.dtype)
dmin = data_type.min
dmax = data_type.max
bee_data = bee_data.astype(np.float64)
bee_data = 2.0 * ((bee_data - dmin) / (dmax - dmin)) - 1.0
bee_data = bee_data.astype(np.float32)
return bee_rate, bee_data
示例6: audiodata_getter
def audiodata_getter(path, date, filedate, filename, index):
#Check to see if it's a wav file. If not, convert in a temp file.
splitname = os.path.splitext(filename)[0]
if os.path.splitext(filename)[1] != ".wav":
temp = tempfile.NamedTemporaryFile(suffix=".wav")
if os.path.splitext(filename)[1] == ".mp3":
if "mp3" in path and date is None:
sound = AudioSegment.from_file(path + filedate[index] + "/" + filename, "mp3")
else:
sound = AudioSegment.from_file(path + filename, "mp3")
sound.export(temp.name, format = "wav")
if os.path.splitext(filename)[1] == ".flac":
if "mp3" in path and date is None:
sound = AudioSegment.from_file(path + filedate[index] + "/" + filename, "flac")
else:
sound = AudioSegment.from_file(path + filename, "flac")
sound.export(temp.name, format = "flac")
try:
wav = wave.open(temp, 'r')
return wav
except:
print(filename + " corrupted or not audio file.")
else:
try:
#Open the .wav file and get the vital information
wav = wave.open(path + "/audio/" + filename, 'r')
return wav
except:
print(filename + " corrupted or not audio file.")
示例7: test_direct_instantiation_with_bytes
def test_direct_instantiation_with_bytes(self):
seg = AudioSegment(
b'RIFF\x28\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x02\x00\x00}\x00\x00\x00\xf4\x01\x00\x04\x00\x10\x00data\x04\x00\x00\x00\x00\x00\x00\x00')
self.assertEqual(seg.frame_count(), 1)
self.assertEqual(seg.channels, 2)
self.assertEqual(seg.sample_width, 2)
self.assertEqual(seg.frame_rate, 32000)
示例8: talklang
def talklang(phrase,lang='FR'):
try:
language_dict = {"FR" : 'fr-FR',
"US" : 'en-US',
"GB" : 'en-GB',
"DE" : 'de-DE',
"ES" : 'es-ES',
"IT" : 'it-IT'
}
language=language_dict[lang]
phrase=phrase.encode('utf-8')
cachepath=os.path.dirname(os.path.dirname(__file__))
file = 'tts'
filename=os.path.join(cachepath,file+'.wav')
filenamemp3=os.path.join(cachepath,file+'.mp3')
os.system('pico2wave -l '+language+' -w '+filename+ ' "' +phrase+ '"')
song = AudioSegment.from_wav(filename)
songmodified=song
songmodified.export(filenamemp3, format="mp3", bitrate="128k", tags={'albumartist': 'Talkie', 'title': 'TTS', 'artist':'Talkie'}, parameters=["-ar", "44100","-vol", "200"])
song = AudioSegment.from_mp3(filenamemp3)
cmd = ['mplayer']
cmd.append(filenamemp3)
if GPIO.input(17) != 0 :
print 'GPIO 17 en cours d\'utilisation'
while GPIO.input(17) != 0 :
time.sleep(0.5)
print 'GPIO 17 libre'
GPIO.output(18, 1)
print 'GPIO 18 ON et synthese du message'
with open(os.devnull, 'wb') as nul:
subprocess.call(cmd, stdout=nul, stderr=subprocess.STDOUT)
GPIO.output(18, 0)
print 'Synthese finie GPIO 18 OFF'
except Exception, e:
return str(e)
示例9: generateFile
def generateFile(self):
wav = default_storage.open('songs/' + str(self.pk) + '.wav', 'wb')
final = None
pitches = map(int, self.pitches.split(','))
durations = map(int, self.durations.split(','))
for pitch, duration in zip(pitches, durations):
fn = 'pitches/' + pitchTable[pitch] + '.wav'
pf = default_storage.open(fn)
if final is None:
final = AudioSegment(pf)[0:durationTable[duration]]
else:
final += AudioSegment(pf)[0:durationTable[duration]]
# Copied from AudioSegment source...
# I should have changed AudioSegment (getWaveFileContents() or something) and submitted a pull request but I have a deadline
# Possibly optimize to just have a string packed with data then use ContentFile instead of File below
wave_data = wave.open(wav, 'wb')
wave_data.setnchannels(final.channels)
wave_data.setsampwidth(final.sample_width)
wave_data.setframerate(final.frame_rate)
wave_data.setnframes(int(final.frame_count()))
wave_data.writeframesraw(final._data)
wave_data.close()
wav.close() # ?
wav_rb = default_storage.open('songs/' + str(self.pk) + '.wav', 'rb')
self.wav.save('songs/' + str(self.pk) + '.wav', File(wav_rb))
wav_rb.close()
示例10: overdub
def overdub(_files, _returnPath):
s1, s2 = AudioSegment.from_wav(_files[0]), AudioSegment.from_wav(_files[1])
_dubbed = s1.overlay(s2)
_dubbed.export(_returnPath, format='wav')
os.remove(_files[0])
os.remove(_files[1])
return True
示例11: outputTrack
def outputTrack(playList):
au_file(name='master.au', freq=0, dur=playList[len(playList)-1][0][1], vol=0.2)
masterSong = AudioSegment.from_file("master.au", "au")
for item in playList:
#obten la longitudDelSegmento
longitudDelSegmento = int(item[0][1]) - int(item[0][0])
#obten Si se loopea
loops = item[2]
#crea los sonidos de esta seccion
sonidoNum = 1 #integra un contador para los sonidos
#crea un sonido temporal que contendra toda esta seccion
au_file(name="instrumento.au", freq=0, dur=longitudDelSegmento, vol=1)
for itemSonido in item[1]:
nombre = 'sound' + str(sonidoNum) +".au"
#print(nombre,itemSonido[2],itemSonido[1], float(itemSonido[0]))
au_file(name=nombre, freq=int(itemSonido[2]), dur=int(itemSonido[1]), vol=float(itemSonido[0]))
sonidoNum += 1
instrumento = AudioSegment.from_file("instrumento.au", "au")
for i in range(1, sonidoNum):
nombre = 'sound' + str(i) +".au"
#abreElArchivo
temp = AudioSegment.from_file(nombre, "au")
#insertaloEnElinstrumento
instrumento = instrumento.overlay(temp, position=0, loop=loops)
#concatenaElInstrumento
instrumento = instrumento[:longitudDelSegmento]
#sobrelapa los sonidos en master
masterSong = masterSong.overlay(instrumento, position=int(item[0][0]))
#final = masterSong*2
masterSong.export("testingSong.emepetres", format="mp3")
示例12: main
def main():
wav_pat = re.compile(r'\.wav$')
#print('End wav path:', end_path)
#sound_end = AudioSegment.from_wav(end_path).set_frame_rate(16000)
for pair in wav_folders:
folder_name = pair[0]
input_folder = input_folder_prefix + '/' + folder_name + '/' + input_folder_suffix
output_folder = output_folder_prefix + '/' + folder_name + '/' + output_folder_suffix
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# find all files with wav suffix
files = list(filter(lambda x: wav_pat.search(x),os.listdir(input_folder)))
num_file = len(files)
last_wav_pat = re.compile(str(num_file) + r'\.wav$')
for filename in files:
#run_single(input_folder + '/' + filename, output_folder + '/' + filename)
print('------')
print('Processing %s...' % (input_folder + '/' + filename))
sound_input = AudioSegment.from_wav(input_folder + '/' + filename)
if last_wav_pat.search(filename):
end_filename = random_pick(end_wavs2)
else:
end_filename = random_pick(end_wavs1)
print('End tone filename:%s' % (end_filename))
sound_end = AudioSegment.from_wav(end_filename).set_frame_rate(16000)
sound_combined = sound_input + sound_end
sound_combined.export(output_folder + '/' + filename, format="wav")
示例13: GetVoice
def GetVoice(word): # https://tts.voicetech.yandex.net/generate?text=text&key=3f874a4e-723d-48cd-a791-7401169035a0&format=mp3&speaker=zahar&emotion=good
req =('https://tts.voicetech.yandex.net/generate?ie=UTF-8&text='+word+'&key='+API_KEY_VOICE+'&format=mp3&speaker=ermil&emotion=neutral')
response = requests.get(req, stream=True)
with open("yasound.mp3", "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
AudioSegment.from_file('yasound.mp3').export("yasound.ogg", format="ogg")
示例14: run
def run(self):
current_files = []
while True:
for file in os.listdir(self.scan_directory):
if file.endswith('.wav') and file not in current_files:
AudioSegment.from_wav(self.scan_directory+file).export(self.mp3_directory + file[:-3] + 'mp3', format='mp3')
current_files.append(file)
示例15: main
def main():
global background
global prettyGirls
global oyeahs
global marsOyeah
global girlsPretty
createBackground(song)
prettyGirls(song)
# we just so pretty
soPretty = song[19990:21250]
soPretty.export('soPretty.wav', 'wav')
soPretty = wave.open('soPretty.wav', 'r')
soPrettySlow = wave.open('soPrettySlow.wav', 'w')
soPrettySlow.setparams(soPretty.getparams())
writeFrames = soPretty.readframes(soPretty.getnframes())
soPrettySlow.setframerate(soPretty.getframerate() / 2)
soPrettySlow.writeframes(writeFrames)
soPrettySlow.close()
soPrettySlow = AudioSegment.from_wav('soPrettySlow.wav')
#combine last two
silent5 = AudioSegment.silent(duration=22000)
smallSilent = AudioSegment.silent(90)
girlsPretty = prettyGirls.append(smallSilent).append(soPrettySlow).append(silent5)
ohYeah(song)
mars(song)
drums(song)
delete()