當前位置: 首頁>>代碼示例>>Python>>正文


Python wave.open方法代碼示例

本文整理匯總了Python中wave.open方法的典型用法代碼示例。如果您正苦於以下問題:Python wave.open方法的具體用法?Python wave.open怎麽用?Python wave.open使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在wave的用法示例。


在下文中一共展示了wave.open方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: play_audio_file

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def play_audio_file(fname=DETECT_DING):
    """Simple callback function to play a wave file. By default it plays
    a Ding sound.

    :param str fname: wave file name
    :return: None
    """
    ding_wav = wave.open(fname, 'rb')
    ding_data = ding_wav.readframes(ding_wav.getnframes())
    audio = pyaudio.PyAudio()
    stream_out = audio.open(
        format=audio.get_format_from_width(ding_wav.getsampwidth()),
        channels=ding_wav.getnchannels(),
        rate=ding_wav.getframerate(), input=False, output=True)
    stream_out.start_stream()
    stream_out.write(ding_data)
    time.sleep(0.2)
    stream_out.stop_stream()
    stream_out.close()
    audio.terminate() 
開發者ID:warchildmd,項目名稱:google-assistant-hotword-raspi,代碼行數:22,代碼來源:snowboydecoder.py

示例2: load_settings

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def load_settings(session_path):
    """
    Load PyBpod Settings files (.json).

    [description]

    :param session_path: Absolute path of session folder
    :type session_path: str
    :return: Settings dictionary
    :rtype: dict
    """
    if session_path is None:
        return
    path = Path(session_path).joinpath("raw_behavior_data")
    path = next(path.glob("_iblrig_taskSettings.raw*.json"), None)
    if not path:
        return None
    with open(path, 'r') as f:
        settings = json.load(f)
    if 'IBLRIG_VERSION_TAG' not in settings.keys():
        settings['IBLRIG_VERSION_TAG'] = ''
    return settings 
開發者ID:int-brain-lab,項目名稱:ibllib,代碼行數:24,代碼來源:raw_data_loaders.py

示例3: load_mic

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def load_mic(session_path):
    """
    Load Microphone wav file to np.array of len nSamples

    :param session_path: Absoulte path of session folder
    :type session_path: str
    :return: An array of values of the sound waveform
    :rtype: numpy.array
    """
    if session_path is None:
        return
    path = Path(session_path).joinpath("raw_behavior_data")
    path = next(path.glob("_iblrig_micData.raw*.wav"), None)
    if not path:
        return None
    fp = wave.open(path)
    nchan = fp.getnchannels()
    N = fp.getnframes()
    dstr = fp.readframes(N * nchan)
    data = np.frombuffer(dstr, np.int16)
    data = np.reshape(data, (-1, nchan))
    return data 
開發者ID:int-brain-lab,項目名稱:ibllib,代碼行數:24,代碼來源:raw_data_loaders.py

示例4: play_wav

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def play_wav(fname, chunk=CHUNK):
    # create an audio object
    wf = wave.open(fname, 'rb')
    p = pyaudio.PyAudio()

    # open stream based on the wave object which has been input.
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True)

    # read data (based on the chunk size)
    data = wf.readframes(chunk)

    # play stream (looping from beginning of file to the end)
    while len(data) > 0:
        # writing to the stream is what *actually* plays the sound.
        stream.write(data)
        data = wf.readframes(chunk)

    # cleanup stuff
    stream.close()
    p.terminate() 
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:25,代碼來源:_audio.py

示例5: __enter__

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def __enter__(self):
		self._audio_interface = pyaudio.PyAudio()
		self._audio_stream = self._audio_interface.open(
			format=pyaudio.paInt16,
			channels=1, rate=self._rate,
			input=True, frames_per_buffer=self._chunk,
			# Run the audio stream asynchronously to fill the buffer object.
			# This is necessary so that the input device's buffer doesn't
			# overflow while the calling thread makes network requests, etc.
			stream_callback=self._fill_buffer,
		)

		self.closed = False

		return self

	#def __exit__(self, type, value, traceback): 
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:19,代碼來源:proj2_yt_mvp.py

示例6: getText2VoiceStream

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def getText2VoiceStream(inText,inFileName):

    channel = grpc.secure_channel('{}:{}'.format(HOST, PORT), getCredentials())
    stub = gigagenieRPC_pb2_grpc.GigagenieStub(channel)

    message = gigagenieRPC_pb2.reqText()
    message.lang=0
    message.mode=0
    message.text=inText
    writeFile=open(inFileName,'wb')
    for response in stub.getText2VoiceStream(message):
        if response.HasField("resOptions"):
            print ("ResVoiceResult: %d" %(response.resOptions.resultCd))
        if response.HasField("audioContent"):
            print ("Audio Stream")
            writeFile.write(response.audioContent)
    writeFile.close() 
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:19,代碼來源:ex4_getText2VoiceStream.py

示例7: play_file

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def play_file(fname):
    # create an audio object
    wf = wave.open(fname, 'rb')
    p = pyaudio.PyAudio()
    chunk = 1024

    # open stream based on the wave object which has been input.
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True)

    # read data (based on the chunk size)
    data = wf.readframes(chunk)

    # play stream (looping from beginning of file to the end)
    while len(data) > 0:
        # writing to the stream is what *actually* plays the sound.
        stream.write(data)
        data = wf.readframes(chunk)

        # cleanup stuff.
    stream.close()
    p.terminate() 
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:26,代碼來源:ex4_getText2VoiceStream.py

示例8: _play_audio

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def _play_audio(path, delay):
        try:
            time.sleep(delay)
            wf = wave.open(path, 'rb')
            p = pyaudio.PyAudio()
            stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                            channels=wf.getnchannels(),
                            rate=wf.getframerate(),
                            output=True)
            
            data = wf.readframes(TextToSpeech.CHUNK)
            
            while data:
                stream.write(data)
                data = wf.readframes(TextToSpeech.CHUNK)
        
            stream.stop_stream()
            stream.close()

            p.terminate()
            return
        except:
            pass 
開發者ID:junzew,項目名稱:HanTTS,代碼行數:25,代碼來源:main.py

示例9: save_wav

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def save_wav(self, chunk_id, model, body, frame_rate):
        checksum = md5.new(body).hexdigest()
        directory = "%s/%s" % (model, checksum[:2])
        self.create_directories_if_needed(self.path + "/" + directory)

        path = '%s/%s/%s.wav' % (self.path, directory, checksum)
        url = '/static/data/%s/%s.wav' % (directory, checksum)

        wav = wave.open(path, 'w')
        wav.setnchannels(1)
        wav.setsampwidth(2)
        wav.setframerate(frame_rate)
        wav.writeframes(body)
        wav.close()

        return (path, url) 
開發者ID:UFAL-DSG,項目名稱:cloud-asr,代碼行數:18,代碼來源:models.py

示例10: cutoff

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def cutoff(input_wav, output_wav):
    '''
    input_wav --- input wav file path
    output_wav --- output wav file path
    '''

    # read input wave file and get parameters.
    with wave.open(input_wav, 'r') as fw:
        params = fw.getparams()
        # print(params)
        nchannels, sampwidth, framerate, nframes = params[:4]

        strData = fw.readframes(nframes)
        waveData = np.fromstring(strData, dtype=np.int16)

        max_v = np.max(abs(waveData))
        for i in range(waveData.shape[0]):
            if abs(waveData[i]) > 0.08 * max_v:
                break

        for j in range(waveData.shape[0] - 1, 0, -1):
            if abs(waveData[j]) > 0.08 * max_v:
                break

    # write new wav file
    with wave.open(output_wav, 'w') as fw:
        params = list(params)
        params[3] = nframes - i - (waveData.shape[0] - 1 - j)
        fw.setparams(params)
        fw.writeframes(strData[2 * i:2 * (j + 1)]) 
開發者ID:KinglittleQ,項目名稱:GST-Tacotron,代碼行數:32,代碼來源:cutoff.py

示例11: _get_timit

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def _get_timit(directory):
  """Extract TIMIT datasets to directory unless directory/timit exists."""
  if os.path.exists(os.path.join(directory, "timit")):
    return

  assert FLAGS.timit_paths
  for path in FLAGS.timit_paths.split(","):
    with tf.gfile.GFile(path) as f:
      with tarfile.open(fileobj=f, mode="r:gz") as timit_compressed:
        timit_compressed.extractall(directory) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:12,代碼來源:audio.py

示例12: _get_audio_data

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def _get_audio_data(filepath):
  # Construct a true .wav file.
  out_filepath = filepath.strip(".WAV") + ".wav"
  # Assumes sox is installed on system. Sox converts from NIST SPHERE to WAV.
  call(["sox", filepath, out_filepath])
  wav_file = wave.open(open(out_filepath))
  frame_count = wav_file.getnframes()
  byte_array = wav_file.readframes(frame_count)
  data = [int(b.encode("hex"), base=16) for b in byte_array]
  return data, frame_count, wav_file.getsampwidth(), wav_file.getnchannels() 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:12,代碼來源:audio.py

示例13: get_cur_audio_length

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def get_cur_audio_length():
    wav_file = wave.open(f"{dir_utils.get_perm_med_dir()}/sound_board/{settings.current_track}.wav", 'r')
    frames = wav_file.getnframes()
    rate = wav_file.getframerate()
    duration = frames / float(rate)
    wav_file.close()
    return duration 
開發者ID:DuckBoss,項目名稱:JJMumbleBot,代碼行數:9,代碼來源:sound_board_utility.py

示例14: get_audio_length

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def get_audio_length(file_name):
    try:
        wav_file = wave.open(f"{dir_utils.get_perm_med_dir()}/sound_board/{file_name}.wav", 'r')
        frames = wav_file.getnframes()
        rate = wav_file.getframerate()
        duration = frames / float(rate)
        wav_file.close()
        if not duration:
            return -1
    except Exception:
        return -1
    return duration 
開發者ID:DuckBoss,項目名稱:JJMumbleBot,代碼行數:14,代碼來源:sound_board_utility.py

示例15: get_cur_audio_length

# 需要導入模塊: import wave [as 別名]
# 或者: from wave import open [as 別名]
def get_cur_audio_length():
    wav_file = wave.open(f"{dir_utils.get_perm_med_dir()}/text_to_speech/{settings.current_track}.oga", 'r')
    frames = wav_file.getnframes()
    rate = wav_file.getframerate()
    duration = frames / float(rate)
    wav_file.close()
    return duration 
開發者ID:DuckBoss,項目名稱:JJMumbleBot,代碼行數:9,代碼來源:text_to_speech_utility.py


注:本文中的wave.open方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。