本文整理汇总了Python中pydub.AudioSegment.silent方法的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment.silent方法的具体用法?Python AudioSegment.silent怎么用?Python AudioSegment.silent使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pydub.AudioSegment
的用法示例。
在下文中一共展示了AudioSegment.silent方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def main():
global background
global prettyGirls
global oyeahs
global marsOyeah
global girlsPretty
createBackground(song)
prettyGirls(song)
# we just so pretty
soPretty = song[19990:21250]
soPretty.export('soPretty.wav', 'wav')
soPretty = wave.open('soPretty.wav', 'r')
soPrettySlow = wave.open('soPrettySlow.wav', 'w')
soPrettySlow.setparams(soPretty.getparams())
writeFrames = soPretty.readframes(soPretty.getnframes())
soPrettySlow.setframerate(soPretty.getframerate() / 2)
soPrettySlow.writeframes(writeFrames)
soPrettySlow.close()
soPrettySlow = AudioSegment.from_wav('soPrettySlow.wav')
#combine last two
silent5 = AudioSegment.silent(duration=22000)
smallSilent = AudioSegment.silent(90)
girlsPretty = prettyGirls.append(smallSilent).append(soPrettySlow).append(silent5)
ohYeah(song)
mars(song)
drums(song)
delete()
示例2: morsesound
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def morsesound(sentence, freq=1000, length=100, path ='output\\'):
"""Turns a sentence into a morse soundfile"""
mor = morse(sentence)
from pydub.generators import Sine
from pydub import AudioSegment
import re
dot = Sine(freq).to_audio_segment(length)
dash = Sine(freq).to_audio_segment(length*3)
sil1 = AudioSegment.silent(length)
sil3 = AudioSegment.silent(length*3)
result = AudioSegment.silent(length)
for a in mor:
if a == ".":
result += dot
elif a == "-":
result += dash
elif a == "/":
result += sil1
else:
result += sil3
result += sil1
filename = path + re.sub(r'[/\?!:*|",.]','',sentence) + '.mp3'
result.export(filename,format="mp3")
return filename
示例3: interpret
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def interpret(file, bpm, metronome, dondokos):
dirstack = file.split(os.sep)[:-1]
song = []
now = 0
def play_voice(name, bpm, metronome, dondokos):
global PADDING
drum_file = os.sep.join(dirstack + [name]) + '.drum'
pattern = read_pattern(open(drum_file))
voice = play(pattern, bpm, metronome, dondokos)
beats = (((len(pattern) + PADDING - 1) / PADDING) * PADDING) / 4
beats = len(pattern) / 4
return voice, beats
for line in read_song(open(file)):
if ':' in line:
key, value = line.split(':')
if key == 'bpm':
bpm += int(value)
elif key == 'metronome':
try:
metronome = int(value)
except:
metronome = None
elif key == 'dondokos':
try:
dondokos = int(value)
except:
dondokos = None
else:
print('bad key/value pair:', [key, value])
continue
if '|' in line:
voices = []
beats = 0
for name in [n.strip() for n in line.split('|')]:
print(name, end='', flush=True)
v, b = play_voice(name, bpm, metronome, dondokos)
voices.append(v)
beats = max(beats, b)
voice = AudioSegment.silent(duration=(60000.0 / bpm)*beats+2000)
for v in voices:
voice = voice.overlay(v)
else:
voice, beats = play_voice(line, bpm, metronome, dondokos)
song.append((now, voice))
time = beats * (60000.0 / bpm)
now += time
master_mix = AudioSegment.silent(duration=now+2000)
for when, voice in song:
master_mix = master_mix.overlay(voice, position=when)
return master_mix
示例4: drums
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def drums(song):
global girlsPretty
silentStart = AudioSegment.silent(duration=13200)
smallSilent = AudioSegment.silent(90)
drums = song[196090:198400]
drums = drums.append(smallSilent)
drums = drums.overlay(drums[500:1000], position = 1300)
drums = drums * 45
drums = drums.apply_gain(-5)
drums = silentStart.append(drums)
output = background.overlay(drums).overlay(marsOyeah, times=3).overlay(girlsPretty, position=35200, times=3)
output.export('output.wav', 'wav')
示例5: recursiveEcho
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def recursiveEcho(track, echoCount=2, delayLength=200):
if echoCount <= 0:
return track
else:
silence = AudioSegment.silent(duration=delayLength)
currentTrack = silence + track
return (currentTrack.overlay(recursiveEcho(currentTrack-20//echoCount, echoCount-1, delayLength)))
示例6: add_audio_segment
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def add_audio_segment(self, new_segment,
time=None,
gain_to_background=None):
if not self.includes_sound:
self.includes_sound = True
self.create_audio_segment()
segment = self.audio_segment
curr_end = segment.duration_seconds
if time is None:
time = curr_end
if time < 0:
raise Exception("Adding sound at timestamp < 0")
new_end = time + new_segment.duration_seconds
diff = new_end - curr_end
if diff > 0:
segment = segment.append(
AudioSegment.silent(int(np.ceil(diff * 1000))),
crossfade=0,
)
self.audio_segment = segment.overlay(
new_segment,
position=int(1000 * time),
gain_during_overlay=gain_to_background,
)
示例7: main
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def main():
print("Gettings raw number sound bites.")
# get each sound as pydub audio segment and add to list for easy access
for i in range(10):
number_sounds.append(AudioSegment.from_ogg("sound_bites/%i.ogg" % i))
# load in the beast by the lines of the file
lines = loadBigNumFileToList()
print("Creating blank audio file in memory.")
output = AudioSegment.silent(duration=500) # 'blank' slate to append to.
job_server = pp.Server()
print("Splitting labor, and starting")
# Define jobs, cpu cores/2 in my case
# give range and other params
job1 = job_server.submit(processRangeForLines, (range(0,10), lines, number_sounds))
job2 = job_server.submit(processRangeForLines, (range(10,20), lines, number_sounds))
# execute and grab value
job1_audio = job1()
job2_audio = job2()
print("Final concatenation.")
output += job1_audio + job2_audio
print("Done making, now exporting... it make take a while.")
file_handle = output.export("output.ogg", format="ogg", bitrate="64k", tags={"artist": "Keely Hill", "comments":"Made proudly."})
print("\033[92m\033[1mComplete!\033[0m")
示例8: createSoundFile
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def createSoundFile(morse):
dot = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\dot.wav")
dash = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\dash.wav")
#word_gap = AudioSegment.from_wav(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\void.wav")
sound_config = AudioSegment.empty()
#Splitting the morse sentence into various word codes
codes = morse.split(" ")
for morseWord in codes:
#Splitting each word code into individual codes
for item in morseWord:
#Adding dot sound for zero
if item == "0":
sound_config += dot
#Adding dash sound for one
elif item == "1":
sound_config += dash
#Adding a 100ms wait between each alphabet
else:
sound_config += AudioSegment.silent(300)
sound_config += dot[0.1:0.2]
#Exporting the sound file as output.wav
sound_config.export(r"C:\Users\Gaurav Keswani\Documents\Eclipse\Morse-Code-Generator\src\resources\sound\morse.wav", format="wav")
示例9: compose
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def compose(segments, out='out.mp3', padding=0, crossfade=0, layer=False):
'''Stiches together a new audiotrack'''
files = {}
working_segments = []
audio = AudioSegment.empty()
if layer:
total_time = max([s['end'] - s['start'] for s in segments]) * 1000
audio = AudioSegment.silent(duration=total_time)
for i, s in enumerate(segments):
try:
start = s['start'] * 1000
end = s['end'] * 1000
f = s['file'].replace('.transcription.txt', '')
if f not in files:
if f.endswith('.wav'):
files[f] = AudioSegment.from_wav(f)
elif f.endswith('.mp3'):
files[f] = AudioSegment.from_mp3(f)
segment = files[f][start:end]
print start, end, f
if layer:
audio = audio.overlay(segment, times=1)
else:
if i > 0:
audio = audio.append(segment, crossfade=crossfade)
else:
audio = audio + segment
if padding > 0:
audio = audio + AudioSegment.silent(duration=padding)
s['duration'] = len(segment)
working_segments.append(s)
except:
continue
audio.export(out, format=os.path.splitext(out)[1].replace('.', ''))
return working_segments
示例10: compose
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def compose(segments, out="out.mp3", padding=0, crossfade=0, layer=False):
"""Stiches together a new audiotrack"""
files = {}
working_segments = []
audio = AudioSegment.empty()
if layer:
total_time = max([s["end"] - s["start"] for s in segments]) * 1000
audio = AudioSegment.silent(duration=total_time)
for i, s in enumerate(segments):
try:
start = s["start"] * 1000
end = s["end"] * 1000
f = s["file"].replace(".transcription.txt", "")
if f not in files:
if f.endswith(".wav"):
files[f] = AudioSegment.from_wav(f)
elif f.endswith(".mp3"):
files[f] = AudioSegment.from_mp3(f)
segment = files[f][start:end]
print start, end, f
if layer:
audio = audio.overlay(segment, times=1)
else:
if i > 0:
audio = audio.append(segment, crossfade=crossfade)
else:
audio = audio + segment
if padding > 0:
audio = audio + AudioSegment.silent(duration=padding)
s["duration"] = len(segment)
working_segments.append(s)
except:
continue
audio.export(out, format=os.path.splitext(out)[1].replace(".", ""))
return working_segments
示例11: get_sound_moved
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def get_sound_moved(sound_base, move):
dur_left = 0
dur_right = 0
sound_trimmed = sound_base
if move > 0:
dur_left = math.fabs(move)
sound_trimmed = sound_base[0:-dur_left]
elif move < 0:
dur_right = math.fabs(move)
sound_trimmed = sound_base[dur_right-1:-1]
sound_sil_left = AudioSegment.silent(duration=dur_left)
sound_sil_right = AudioSegment.silent(duration=dur_right)
sound_moved = sound_sil_left + sound_trimmed + sound_sil_right
return sound_moved
示例12: make_audio_from_sections
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def make_audio_from_sections(sections):
duration = sum([section_duration(s) for s in sections])
result = Audio.silent(duration=duration)
moments = [m for section in sections for m in section]
loc = 0
for moment in moments:
result = result.overlay(moment.audio, position=loc)
loc += moment.duration
return result
示例13: run_single
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def run_single(input_filename, output_filename, tone_path, insert_silence = True):
sound_end = AudioSegment.from_wav(tone_path).set_frame_rate(16000)
print('------')
print('Processing %s...' % (input_filename))
sound_input = AudioSegment.from_wav(input_filename)
if insert_silence:
sound_combined = AudioSegment.silent(duration=1000, frame_rate = 44100) + sound_input + AudioSegment.silent(duration=3000, frame_rate = 44100) + sound_end
else:
sound_combined = sound_input + sound_end
sound_combined.export(output_filename, format="wav")
示例14: test_array_type
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def test_array_type(self):
self.assertEqual(self.seg1.array_type, "h")
self.assertEqual(self.seg2.array_type, "h")
self.assertEqual(self.seg3.array_type, "h")
self.assertEqual(self.mp3_seg_party.array_type, "h")
silence = AudioSegment.silent(50)
self.assertEqual(silence.array_type, "h")
self.assertEqual(silence.set_sample_width(1).array_type, "b")
self.assertEqual(silence.set_sample_width(4).array_type, "i")
示例15: download_and_cut_song
# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import silent [as 别名]
def download_and_cut_song(song_name):
r = requests.get("http://f.muzis.ru/{}".format(song_name))
with open("data/music/full/{}".format(song_name), 'wb') as file:
file.write(r.content)
song = AudioSegment.from_mp3("data/music/full/{}".format(song_name))
second_of_silence = AudioSegment.silent(duration=0.5 * 1000)
song_len = len(song)
first_piece = song[:10*1000]
second_piece = song[song_len/3:song_len/3 + 10 * 1000]
third_piece = song[song_len / 3 * 2:song_len / 3 * 2+ 10 * 1000]
cut_song = first_piece + second_of_silence + second_piece + second_of_silence + third_piece
cut_song.export("data/music/cut/cut_{}".format(song_name), format='mp3')