当前位置: 首页>>代码示例>>Python>>正文


Python AudioSegment.from_wav方法代码示例

本文整理汇总了Python中pydub.AudioSegment.from_wav方法的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment.from_wav方法的具体用法?Python AudioSegment.from_wav怎么用?Python AudioSegment.from_wav使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pydub.AudioSegment的用法示例。


在下文中一共展示了AudioSegment.from_wav方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: addFrameWithTransition

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def addFrameWithTransition(self, image_file, audio_file, transition_file):
        media_info = MediaInfo.parse(transition_file)
        duration_in_ms = media_info.tracks[0].duration
        audio_file = audio_file.replace("\\", "/")
        try:
            audio_clip = AudioSegment.from_wav(r"%s"%audio_file)
            f = sf.SoundFile(r"%s"%audio_file)
        except Exception as e:
            print(e)
            audio_clip = AudioSegment.from_wav("%s/pause.wav" % settings.assetPath)
            f = sf.SoundFile("%s/pause.wav" % settings.assetPath)
        duration = (len(f) / f.samplerate)
        audio_clip_with_pause = audio_clip
        self.imageframes.append(image_file)
        self.audiofiles.append(audio_clip_with_pause)
        self.durations.append(duration)
        self.transitions.append((transition_file, len(self.imageframes) - 1, duration_in_ms / 1000)) 
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:19,代码来源:generatemovie.py

示例2: addFrameWithTransitionAndPause

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def addFrameWithTransitionAndPause(self, image_file, audio_file, transition_file, pause):
        media_info = MediaInfo.parse(transition_file)
        duration_in_ms = media_info.tracks[0].duration
        audio_file = r"%s"%audio_file
        f = sf.SoundFile(audio_file)
        try:
            audio_clip = AudioSegment.from_wav(audio_file)
        except:
            print("error with frame audio transition pause for %s" % audio_file)
            audio_clip = AudioSegment.silent(duration=pause)
        duration = (len(f) / f.samplerate)
        audio_clip_with_pause = audio_clip
        self.imageframes.append(image_file)
        self.audiofiles.append(audio_clip_with_pause)
        self.durations.append(duration + (pause/1000))
        self.transitions.append((transition_file, len(self.imageframes) - 1, (duration_in_ms / 1000) + (pause/1000))) 
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:18,代码来源:generatemovie.py

示例3: segment_audio

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def segment_audio(self, file_name, segments):
        audio_data = AudioSegment.from_wav(file_name)

        for segment in segments:
            assert segment[0] < segment[1]
            center = round((segment[0] + segment[1]) / 2)

            padding = round(self.audio_length / 2)
            if center < padding:
                start_time = 0
            else:
                start_time = center - padding

            end_time = start_time + self.audio_length

            audio_segment = audio_data[start_time:end_time]

            file_prefix = os.path.basename(file_name).split('.')[0]

            file_name = os.path.join(self.output_dir, file_prefix + "_" + str(start_time) + "~" + str(end_time) + ".wav")
            print(file_name)

            audio_segment.export(file_name, format="wav") 
开发者ID:castorini,项目名称:honk,代码行数:25,代码来源:youtube_processor.py

示例4: extract_words

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def extract_words(files):
    ''' Extracts individual words form files and exports them to individual files. '''
    output_directory = 'extracted_words'
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    for f in files:
        file_format = None
        source_segment = None
        if f.lower().endswith('.mp3'):
            file_format = 'mp3'
            source_segment = AudioSegment.from_mp3(f)
        elif f.lower().endswith('.wav'):
            file_format = 'wav'
            source_segment = AudioSegment.from_wav(f)
        if not file_format or source_segment:
            print('Unsupported audio format for ' + f)
        sentences = convert_timestamps(files)
        for s in sentences:
            for word in s['words']:
                start = float(word[1]) * 1000
                end = float(word[2]) * 1000
                word = word[0]
                total_time = end - start
                audio = AudioSegment.silent(duration=total_time)
                audio = audio.overlay(source_segment[start:end])
                number = 0
                output_path = None
                while True:
                    output_filename = word
                    if number:
                        output_filename += "_" + str(number)
                    output_filename = output_filename + '.' + file_format
                    output_path = os.path.join(output_directory, output_filename)
                    if not os.path.exists(output_path):
                        # this file doesn't exist, so we can continue
                        break
                    # file already exists, increment name and try again
                    number += 1
                print('Exporting to: ' + output_path)
                audio.export(output_path, format=file_format) 
开发者ID:antiboredom,项目名称:audiogrep,代码行数:43,代码来源:audiogrep.py

示例5: addFrame

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def addFrame(self, image_file, audio_file):
        audio_file = audio_file.replace("\\", "/")
        try:
            audio_clip = AudioSegment.from_wav(r"%s"%audio_file)
            f = sf.SoundFile(r"%s"%audio_file)
        except Exception as e:
            print(e)
            audio_clip = AudioSegment.from_wav("%s/pause.wav" % settings.assetPath)
            f = sf.SoundFile("%s/pause.wav" % settings.assetPath)

        duration = len(f) / f.samplerate
        self.imageframes.append(image_file)
        self.audiofiles.append(audio_clip)
        self.durations.append(duration) 
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:16,代码来源:generatemovie.py

示例6: addFrameWithPause

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def addFrameWithPause(self, image_file, audio_file, pause):
        audio_file = audio_file.replace("\\", "/")
        f = sf.SoundFile(audio_file)
        audio_clip = AudioSegment.from_wav(audio_file)
        duration = (len(f) / f.samplerate) + pause / 1000
        audio_clip_with_pause = audio_clip + AudioSegment.silent(duration=pause)
        self.imageframes.append(image_file)
        self.audiofiles.append(audio_clip_with_pause)
        self.durations.append(duration) 
开发者ID:HA6Bots,项目名称:Automatic-Youtube-Reddit-Text-To-Speech-Video-Generator-and-Uploader,代码行数:11,代码来源:generatemovie.py

示例7: audio_generator

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def audio_generator(dict_dir, text, output_dest):

    with open(dict_dir + "/myDict.py") as f:
        myDict = ast.literal_eval(f.read())

    textList = text.split(" ")

    mainList = []

    for i in textList:
        if i in myDict.keys():
            mainList.append(AudioSegment.from_wav(dict_dir + "/" + myDict[i]))

    # Check to see if at least one word was generated
    if mainList == []:
        raise Exception('\033[91m' + "None of the words you entered was" +
                        " spoken by your figure." + '\033[0m')

    # If a file with the default name exits, create a new name with a
    # new suffix
    res = 0
    while(os.path.exists(output_dest + "/output" + str(res) + ".wav")):
        res += 1

    mainAudio = mainList[0]

    # Concatenate selected audio words
    for i in range(1, len(mainList)):
        mainAudio += mainList[i]

    # Export the joined audio
    mainAudio.export(output_dest + '/output' + str(res) + '.wav', format="wav")

    if os.path.exists(output_dest + "/output" + str(res) + ".wav"):
        print ('\033[94m' + "Speech-Hacker: " +
               "Your audio was generated at: " + output_dest + "/output" +
               str(res) + ".wav" + '\033[0m')
    else:
        print ("Speech-Hacker: " '\033[91m' +
               "Failed to generate your requested audio." + '\033[0m') 
开发者ID:ParhamP,项目名称:Speech-Hacker,代码行数:42,代码来源:generator.py

示例8: synthesize

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def synthesize(self, text, src, dst):
        """
        Synthesize .wav from text
        src is the folder that contains all syllables .wav files
        dst is the destination folder to save the synthesized file
        """
        print("Synthesizing ...")
        delay = 0
        increment = 355 # milliseconds
        pause = 500 # pause for punctuation
        syllables = lazy_pinyin(text, style=pypinyin.TONE3)

        # initialize to be complete silence, each character takes up ~500ms
        result = AudioSegment.silent(duration=500*len(text))
        for syllable in syllables:
            path = src+syllable+".wav"
            sound_file = Path(path)
            # insert 500 ms silence for punctuation marks
            if syllable in TextToSpeech.punctuation:
                short_silence = AudioSegment.silent(duration=pause)
                result = result.overlay(short_silence, position=delay)
                delay += increment
                continue
            # skip sound file that doesn't exist
            if not sound_file.is_file():
                continue
            segment = AudioSegment.from_wav(path)
            result = result.overlay(segment, position=delay)
            delay += increment

        directory = dst
        if not os.path.exists(directory):
            os.makedirs(directory)

        result.export(directory+"generated.wav", format="wav")
        print("Exported.") 
开发者ID:junzew,项目名称:HanTTS,代码行数:38,代码来源:main.py

示例9: load_audio_file

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def load_audio_file(filename,resize=False):
	sound = None
	try:
		if filename.endswith('.mp3') or filename.endswith('.MP3'):
			sound = AudioSegment.from_mp3(filename)
		elif filename.endswith('.wav') or filename.endswith('.WAV'):
			sound = AudioSegment.from_wav(filename)
		elif filename.endswith('.ogg'):
			sound = AudioSegment.from_ogg(filename)
		elif filename.endswith('.flac'):
			sound = AudioSegment.from_file(filename, "flac")
		elif filename.endswith('.3gp'):
			sound = AudioSegment.from_file(filename, "3gp")
		elif filename.endswith('.3g'):
			sound = AudioSegment.from_file(filename, "3gp")

		sound = sound.set_frame_rate(samplerate)
		sound = sound.set_channels(1)
		sound = sound.set_sample_width(2)
		duration = sound.duration_seconds
	except:
		print("Couldn't load file")
		return None,None
		
		
	
	return sound,duration 
开发者ID:nyumaya,项目名称:nyumaya_audio_recognition,代码行数:29,代码来源:test_accuracy.py

示例10: sinsyFix

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def sinsyFix(wavPath,tempo):
	song = AudioSegment.from_wav(wavPath)
	song = song[int(1000*4*60/tempo):] # Delete extra 4 beats of silence at the beginning of the file
	song.export(wavPath,format="wav") 
开发者ID:mathigatti,项目名称:midi2voice,代码行数:6,代码来源:midi2voice.py

示例11: get_durations_from_dir

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def get_durations_from_dir(audio_dir, file_extension='.wav'):
    durations = list()
    for root, dirs, filenames in os.walk(audio_dir):
        for file_name in filenames:
            if file_extension in file_name:
                file_path = os.path.join(root, file_name)
                audio = AudioSegment.from_wav(file_path)
                duration = audio.duration_seconds
                durations.append(duration)
    return np.array(durations) 
开发者ID:dessa-oss,项目名称:fake-voice-detection,代码行数:12,代码来源:utils.py

示例12: slice_audio

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def slice_audio(audio_file, end=3):
    """ Slices a single audio file into 3 second chunks """
    start = 0
    end *= 1000
    audio = AudioSegment.from_wav(audio_file)
    slices = []

    for i in range(10):
        audio_slice = audio[start:end]
        slices.append(audio_slice)
        start += 3000
        end += 3000

    return slices 
开发者ID:nlopez99,项目名称:DeepMusicClassification,代码行数:16,代码来源:dataset_tools.py

示例13: synthesize

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def synthesize(self, input_text='', output_wav_path=''):
        """
        Synthesize .wav from text
        input_text: the folder that contains all syllables .wav files
        output_wav_path: the destination folder to save the synthesized file
        """
        delay = 0
        increment = 355  # milliseconds
        pause = 500  # pause for punctuation
        syllables = lazy_pinyin(input_text, style=pypinyin.TONE3)

        # initialize to be complete silence, each character takes up ~500ms
        result = AudioSegment.silent(duration=500 * len(input_text))
        for syllable in syllables:
            path = os.path.join(self.syllables_dir, syllable + ".wav")
            sound_file = Path(path)
            # insert 500 sr silence for punctuation marks
            if syllable in self.punctuation:
                short_silence = AudioSegment.silent(duration=pause)
                result = result.overlay(short_silence, position=delay)
                delay += increment
                continue
            # skip sound file that doesn't exist
            if not sound_file.is_file():
                continue
            segment = AudioSegment.from_wav(path)
            result = result.overlay(segment, position=delay)
            delay += increment
        if not output_wav_path:
            output_wav_path = 'out.wav'

        result.export(output_wav_path, format="wav")
        default_logger.debug("Exported:" + output_wav_path)
        return result 
开发者ID:shibing624,项目名称:parrots,代码行数:36,代码来源:tts.py

示例14: segment_mosei

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def segment_mosei(args):
    output_dir = args.output_path
    mosei_summary = os.path.join(output_dir, 'mosei_no_semi.csv')
    flac_dir = os.path.join(output_dir, 'flac')
    assert os.path.exists(mosei_summary), 'Output path should already be created with a mosei_no_semi.csv inside it'
    for target_dir in [flac_dir]:
        if os.path.exists(target_dir):
            decision = input(f'{target_dir} already exists. Remove it? [Y/N]: ')
            if decision.upper() == 'Y':
                shutil.rmtree(target_dir)
                print(f'{target_dir} removed')
            else:
                print('Abort')
                exit(0)
        os.makedirs(target_dir)

    df = pd.read_csv(mosei_summary)

    for index, row in df.iterrows():
        underscore = row.key
        wavname = f'{row.filename}.wav'
        wavpath = os.path.join(args.data_path, wavname)
        assert os.path.exists(wavpath), f'wav not exists: {wavpath}'
        wav = AudioSegment.from_wav(wavpath)

        start = int(row.start * 1000)
        end = int(row.end * 1000)
        assert start >= 0, f'{underscore} has negative start time'
        assert end >= 0, f'{underscore} has negative end time'
        seg_wav = wav[start:end]
        seg_flacpath = os.path.join(flac_dir, f'{underscore}.flac')
        seg_wav.export(seg_flacpath, format='flac', parameters=['-ac', '1', '-sample_fmt', 's16', '-ar', '16000'])


########
# MAIN #
######## 
开发者ID:andi611,项目名称:Self-Supervised-Speech-Pretraining-and-Representation-Learning,代码行数:39,代码来源:segment_mosei.py

示例15: compose

# 需要导入模块: from pydub import AudioSegment [as 别名]
# 或者: from pydub.AudioSegment import from_wav [as 别名]
def compose(segments, out='out.mp3', padding=0, crossfade=0, layer=False):
    '''Stiches together a new audiotrack'''

    files = {}

    working_segments = []

    audio = AudioSegment.empty()

    if layer:
        total_time = max([s['end'] - s['start'] for s in segments]) * 1000
        audio = AudioSegment.silent(duration=total_time)

    for i, s in enumerate(segments):
        try:
            start = s['start'] * 1000
            end = s['end'] * 1000
            f = s['file'].replace('.transcription.txt', '')
            if f not in files:
                if f.endswith('.wav'):
                    files[f] = AudioSegment.from_wav(f)
                elif f.endswith('.mp3'):
                    files[f] = AudioSegment.from_mp3(f)

            segment = files[f][start:end]

            print(start, end, f)

            if layer:
                audio = audio.overlay(segment, times=1)
            else:
                if i > 0:
                    audio = audio.append(segment, crossfade=crossfade)
                else:
                    audio = audio + segment

            if padding > 0:
                audio = audio + AudioSegment.silent(duration=padding)

            s['duration'] = len(segment)
            working_segments.append(s)
        except:
            continue

    audio.export(out, format=os.path.splitext(out)[1].replace('.', ''))
    return working_segments 
开发者ID:antiboredom,项目名称:audiogrep,代码行数:48,代码来源:audiogrep.py


注:本文中的pydub.AudioSegment.from_wav方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。