本文整理汇总了Python中pydub.AudioSegment.from_wav方法的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment.from_wav方法的具体用法?Python AudioSegment.from_wav怎么用?Python AudioSegment.from_wav使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pydub.AudioSegment
的用法示例。
在下文中一共展示了AudioSegment.from_wav方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def run(self):
current_files = []
while True:
for file in os.listdir(self.scan_directory):
if file.endswith('.wav') and file not in current_files:
AudioSegment.from_wav(self.scan_directory+file).export(self.mp3_directory + file[:-3] + 'mp3', format='mp3')
current_files.append(file)
示例2: main
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def main():
converted_files = []
if not os.path.exists(OUTPUT_DIR):
try:
os.makedirs(OUTPUT_DIR)
except Exception as e:
now = datetime.now().strftime('%Y.%m.%d %H:%M')
logger.error(
"{} Ошибка при создании директории, текст: {}".format(now, e)
)
sys.exit(1)
while True:
files = [f for f in os.listdir(DIRECTORY) if os.path.isfile(
os.path.join(DIRECTORY, f))]
for f in files:
if f.split('.')[1] == EXTENSION and f not in converted_files:
new_name = f.split('.')[0] + '.mp3'
now = datetime.now().strftime('%Y.%m.%d %H:%M')
try:
AudioSegment.from_wav(os.path.join(DIRECTORY, f)).export(
os.path.join(OUTPUT_DIR, new_name), format="mp3")
converted_files.append(f)
logger.debug(
"{} Успешно переконвертировали файл {} ".format(now, f)
)
except Exception as e:
logger.error(
"{} Ошибка при конвертации файла {}, текст: {}".
format(now, f, e)
)
sys.exit(1)
示例3: createSoundFile
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def createSoundFile(morse):
dot = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\dot.wav")
dash = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\dash.wav")
#word_gap = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\void.wav")
sound_config = AudioSegment.empty()
#Splitting the morse sentence into various word codes
codes = morse.split(" ")
for morseWord in codes:
#Splitting each word code into individual codes
for item in morseWord:
#Adding dot sound for zero
if item == "0":
sound_config += dot
#Adding dash sound for one
elif item == "1":
sound_config += dash
#Adding a 100ms wait between each alphabet
else:
sound_config += AudioSegment.silent(300)
sound_config += dot[0.1:0.2]
#Exporting the sound file as output.wav
sound_config.export(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\morse.wav", format="wav")
示例4: overdub
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def overdub(_files, _returnPath):
s1, s2 = AudioSegment.from_wav(_files[0]), AudioSegment.from_wav(_files[1])
_dubbed = s1.overlay(s2)
_dubbed.export(_returnPath, format='wav')
os.remove(_files[0])
os.remove(_files[1])
return True
示例5: main
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def main():
wav_pat = re.compile(r'\.wav$')
#print('End wav path:', end_path)
#sound_end = AudioSegment.from_wav(end_path).set_frame_rate(16000)
for pair in wav_folders:
folder_name = pair[0]
input_folder = input_folder_prefix + '/' + folder_name + '/' + input_folder_suffix
output_folder = output_folder_prefix + '/' + folder_name + '/' + output_folder_suffix
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# find all files with wav suffix
files = list(filter(lambda x: wav_pat.search(x),os.listdir(input_folder)))
num_file = len(files)
last_wav_pat = re.compile(str(num_file) + r'\.wav$')
for filename in files:
#run_single(input_folder + '/' + filename, output_folder + '/' + filename)
print('------')
print('Processing %s...' % (input_folder + '/' + filename))
sound_input = AudioSegment.from_wav(input_folder + '/' + filename)
if last_wav_pat.search(filename):
end_filename = random_pick(end_wavs2)
else:
end_filename = random_pick(end_wavs1)
print('End tone filename:%s' % (end_filename))
sound_end = AudioSegment.from_wav(end_filename).set_frame_rate(16000)
sound_combined = sound_input + sound_end
sound_combined.export(output_folder + '/' + filename, format="wav")
示例6: manipulate
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def manipulate(filename, output):
# song = AudioSegment.from_wav(filename)
# song = AudioSegment.from_mp3("never_gonna_give_you_up.mp3")
animals = AudioSegment.from_wav("files/payphone128.wav")[16462:]
blame = AudioSegment.from_wav("files/bg128.wav")[8584:]
music = animals.overlay(blame, position=0)
music.export("algo.wav", format="wav")
示例7: test3
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def test3():
src= AudioSegment.from_wav(r'Output_audio\part_from_join_short.wav')
l = len(src)
seek= AudioSegment.from_wav(r'Output_audio\part_from_file.wav')
r = findSound(src, 0, l, seek)
print(min(D))
return True
示例8: match
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def match(filename1, filename2):
song1 = AudioSegment.from_wav(filename1) - 7
song2 = AudioSegment.from_wav(filename2)
bpm1, peaks1 = bpm_detection.bpm_detection(filename1, 3)
bpm2, peaks2 = bpm_detection.bpm_detection(filename2, 3)
song1_match = song1[9000 + peaks1[1] :]
song2_match = song2[3000 + peaks2[1] :].fade_in(2000).fade_out(2000) + 6
music = song1[: 9000 + peaks1[1]].append(song1_match.overlay(song2_match, position=0))
return music.fade_in(2000)
示例9: setUp
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def setUp(self):
global test1wav, test4wav
if not test1wav:
test1wav = AudioSegment.from_wav(os.path.join(data_dir, 'test1.wav'))
if not test4wav:
test4wav = AudioSegment.from_wav(os.path.join(data_dir, 'test4.wav'))
self.seg1 = test1wav
self.seg4 = test4wav
示例10: test4
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def test4():
src= AudioSegment.from_wav(r'Output_audio\part_from_join_short.wav')
l = len(src)
seek= AudioSegment.from_wav(r'Output_audio\part_from_file.wav')
fr,till = findSound(src, 0, l, seek)
if fr!=101 and till!=1728:
raise Exception('Test4 not passed ')
print(r)
return True
示例11: run_single
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def run_single(input_filename, output_filename, tone_path, insert_silence = True):
sound_end = AudioSegment.from_wav(tone_path).set_frame_rate(16000)
print('------')
print('Processing %s...' % (input_filename))
sound_input = AudioSegment.from_wav(input_filename)
if insert_silence:
sound_combined = AudioSegment.silent(duration=1000, frame_rate = 44100) + sound_input + AudioSegment.silent(duration=3000, frame_rate = 44100) + sound_end
else:
sound_combined = sound_input + sound_end
sound_combined.export(output_filename, format="wav")
示例12: main
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def main():
(read_file_handles, qpm, output_filename, octave, num_notes, width, output_score_file, pysynth_module, raaga) = GetOptions()
make_wav = ImportPysynthModule(pysynth_module)
carnatic_songs= []
for f in read_file_handles:
s = f.read()
song = carnatic_util.CollectNotes(carnatic_util.PreProcessScore(s))
carnatic_songs.append(song)
f.close()
markov_song_generator = markov_analyser.MarkovAnalyser(width, raaga)
print "Reading Songs.."
for song in carnatic_songs:
markov_song_generator.AddSong(song)
print "Analysing Songs.."
markov_song_generator.MarkovAnalyse()
print "Generating Song.."
markov_song_generator.MarkovGenerate(num_notes)
generated_song = markov_song_generator.GetGeneratedSong(output_score_file)
generated_song = carnatic_util.ConvertLengthToTempo(generated_song)
print "Converting to WAV.."
english_notes = []
base_note = raagas.Base_Note()
base_line = []
total_length = 5
for (note, length) in generated_song:
english_note = raagas.Translate(note, octave)
english_notes.append((english_note, length))
total_length+=1
base_line.append((base_note, length))
#base_line.append((base_note, total_length))
print english_notes
make_wav(english_notes, fn=output_filename, bpm = qpm)
make_wav(base_line, fn="base_line.wav", bpm = qpm)
sound1 = AudioSegment.from_wav(output_filename)
sound2 = AudioSegment.from_wav("base_line.wav")
# mix sound2 with sound1, starting at 5000ms into sound1)
output = sound1.overlay(sound2)
# output = sound1
# save the result
output.export('final_output2.wav', format="wav")
return
示例13: export_digital
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def export_digital(item):
if "std" in item.option:
digital_track = AudioSegment.from_wav(item.path)
digital_track_path = os.path.join(DIGITAL_FOLDER, item.name) + "." + item.digital_ext
digital_track.export(
out_f=digital_track_path,
format=item.digital_ext,
bitrate="192k",
tags={"title": item.album, "artist": item.artist, "album": item.album},
)
# Add cover art
if item.thumb and (item.digital_ext == "mp3"):
mutagen_audio = MP3(digital_track_path, ID3=ID3)
try:
# Add ID3 tag if it doesn't exist
mutagen_audio.add_tags()
except error:
pass
mutagen_audio.tags.add(
APIC(
encoding=3, # 3 is for utf-8
mime="image/jpeg", # image/jpeg or image/png
type=3, # 3 is for the cover image
desc=u"Cover",
data=open(item.thumb_path, "rb").read(),
)
)
mutagen_audio.save()
elif item.thumb:
raise Exception("Time to implement FLAC cover images!")
else:
logging.warning("No cover found for item {0}".format(item.name))
# Deluxe / 45
else:
split.split_item(item, DIGITAL_FOLDER)
示例14: splitMonologues
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def splitMonologues(danpass, monos):
sheet = loadWorksheet(op.join(danpass, "monologues.xlsx"))
words, transcriptions, phonetics = extractMonologueData([sheet])
filesUtility.createDirectory(targetDirectoryMono)
for key in transcriptions:
file = op.join(monos, key+".wav")
if not op.isfile(file):
continue
sound = AudioSegment.from_wav(file)
size = op.getsize(file)
splitTimes = []
if (size <= 1.5 * approxFileSize):
print ("%sFile '%s' is too small for meaningful split.%s" % (bcolors.OKBLUE, file, bcolors.ENDC))
else:
numberOfSplits = int(size / approxFileSize)
soundDuration = len(sound)
# print ("Number of splits: %s" % (numberOfSplits))
approxLength = int(soundDuration / (numberOfSplits+1))
splitTimes = splitSingleMono(approxLength, transcriptions[key])
if (len(splitTimes) == 0):
# No splits.
print ("%sNo splits.%s" % (bcolors.OKBLUE, bcolors.ENDC))
sound.export(targetDirectoryMono + key + ".wav", format="wav")
else:
previousSplit = 0
segments = []
for splitPoint in splitTimes:
mSec = int(round(splitPoint * 1000.0))
segments.append(sound[previousSplit:mSec])
previousSplit = mSec
segments.append(sound[previousSplit:len(sound)])
for i in range(0, len(segments)):
segments[i].export(targetDirectoryMono + key + "(" + str(i) + ").wav", format="wav")
createLabels(targetDirectoryMono+key, splitTimes, transcriptions[key])
示例15: get_text
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def get_text(self):
if not self.text is None:
return self.text
# convert wav file to FLAC
(_,stt_flac_filename) = tempfile.mkstemp('.flac')
sound = AudioSegment.from_wav(self.audio.filename())
sound.export(stt_flac_filename, format="flac")
# send to Google to interpret into text
g_url = "http://www.google.com/speech-api/v1/recognize?lang=en"
headers = {'Content-Type': 'audio/x-flac; rate= %d;' % self.rec_rate}
recording_flac_data = open(stt_flac_filename, 'rb').read()
try:
r = requests.post(g_url, data=recording_flac_data, headers=headers)
except requests.exceptions.ConnectionError:
raise ConnectionLostException()
os.remove(stt_flac_filename)
self.audio.housekeeping()
response = r.text
if not 'hypotheses' in response:
raise NotUnderstoodException()
# we are only interested in the most likely utterance
phrase = json.loads(response)['hypotheses'][0]['utterance']
print "Heard: " + phrase
return str(phrase)