本文整理汇总了Python中pyaudio.PyAudio.get_default_host_api_info方法的典型用法代码示例。如果您正苦于以下问题:Python PyAudio.get_default_host_api_info方法的具体用法?Python PyAudio.get_default_host_api_info怎么用?Python PyAudio.get_default_host_api_info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyaudio.PyAudio
的用法示例。
在下文中一共展示了PyAudio.get_default_host_api_info方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Audio_play
# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_default_host_api_info [as 别名]
def Audio_play(filepath):
'''
play audio
'''
CHUNK = 1024
wf = wave.open(filepath, 'rb')
pa = PyAudio()
default_output = pa.get_default_host_api_info().get('defaultOutputDevice')
stream =pa.open(format = pa.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True,
output_device_index = default_output)
NUM = int(wf.getframerate()/CHUNK * 15)
logging.info(">> START TO PLAY AUDIO")
while NUM:
data = wf.readframes(CHUNK)
if data == " ": break
stream.write(data)
NUM -= 1
stream.stop_stream()
stream.close()
del data
pa.terminate()
示例2: Audio_record_play
# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_default_host_api_info [as 别名]
def Audio_record_play(seconds,play,filename):
'''
This function include record and play, if you want to play and record,
please set the play is True.
The sample rate is 44100
Bit:16
'''
CHUNK = 1024
CHANNELS = 2
SAMPLING_RATE = 44100
FORMAT = paInt16
NUM = int(SAMPLING_RATE/CHUNK * seconds)
save_buffer = []
if play is True:
source_file = autohandle_directory + '/audio_lib/'+'source1.wav'
swf = wave.open(source_file, 'rb')
#open audio stream
pa = PyAudio()
default_input = pa.get_default_host_api_info().get('defaultInputDevice')
stream = pa.open(
format = FORMAT,
channels = CHANNELS,
rate = SAMPLING_RATE,
input = True,
output = play,
frames_per_buffer = CHUNK,
input_device_index = default_input
)
logging.info(">> START TO RECORD AUDIO")
while NUM:
save_buffer.append(stream.read(CHUNK))
NUM -= 1
if play is True:
data = swf.readframes(CHUNK)
stream.write(data)
if data == " ": break
#close stream
stream.stop_stream()
stream.close()
pa.terminate()
# save wav file
def save_wave_file(filename,data):
wf_save = wave.open(filename, 'wb')
wf_save.setnchannels(CHANNELS)
wf_save.setsampwidth(pa.get_sample_size(FORMAT))
wf_save.setframerate(SAMPLING_RATE)
wf_save.writeframes("".join(data))
wf_save.close()
save_wave_file(filename, save_buffer)
del save_buffer[:]
示例3: MainWindow
# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_default_host_api_info [as 别名]
class MainWindow(QtGui.QMainWindow):
""" A Qt QMainWindow that is home to a matplotlib figure and two combo
boxes. The combo boxes allow the selection of a sound card by API and
name. The figure will show the waveform of the audio input of that sound
card.
"""
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# Monkey patch missing methods into PyAudio.
PyAudio.device_index_to_host_api_device_index = (
device_index_to_host_api_device_index)
self.pyaudio = PyAudio()
# Create the UI widgets.
central_widget = QtGui.QWidget(self)
self.setCentralWidget(central_widget)
main_layout = QtGui.QVBoxLayout(central_widget)
self.figure = FigureWidget()
main_layout.addWidget(self.figure)
horizontal_layout = QtGui.QHBoxLayout()
main_layout.addLayout(horizontal_layout)
api_list = QtGui.QComboBox()
api_list.setModel(APIListModel(self.pyaudio))
horizontal_layout.addWidget(api_list)
device_list = QtGui.QComboBox()
device_list_model = DeviceListModel(self.pyaudio)
device_list.setModel(device_list_model)
horizontal_layout.addWidget(device_list)
# Connect the moving parts
api_list.currentIndexChanged.connect(device_list_model.set_api_index)
api_list.currentIndexChanged.connect(self.change_api_index)
device_list.currentIndexChanged.connect(self.change_device_index)
# Tell all widgets to use the default audio device.
default_api_index = (
self.pyaudio.get_default_input_device_info()["hostApi"])
default_device_index = (
self.pyaudio.device_index_to_host_api_device_index(
self.pyaudio.get_default_host_api_info()["defaultInputDevice"],
default_api_index))
self.api_index = default_api_index
self.device_index = default_device_index
self.stream = None
api_list.setCurrentIndex(default_api_index)
device_list_model.set_api_index(default_api_index)
device_list.setCurrentIndex(default_device_index)
def closeEvent(self, event):
""" Called by Qt when the program quits. Stops audio processing. """
self.stream.close()
# wait for audio processing to clear its buffers
time.sleep(0.1)
def change_api_index(self, api_index):
""" Restarts audio processing with new index. """
self.api_index = api_index
self.restart_audio()
def change_device_index(self, device_index):
""" Restarts audio processing with new index. """
self.device_index = device_index
self.restart_audio()
def restart_audio(self):
""" Restarts audio processing with current API and device indices. """
device_info = (
self.pyaudio.get_device_info_by_host_api_device_index(self.api_index,
self.device_index))
self.num_channels = device_info['maxInputChannels']
if self.stream:
self.stream.close()
self.stream = self.pyaudio.open(
rate=int(device_info['defaultSampleRate']),
channels=self.num_channels,
input_device_index=device_info['index'],
format=paFloat32,
input=True,
stream_callback=self.audio_callback)
self.figure.create_plots(self.num_channels)
def audio_callback(self, in_data, frame_count, time_info, status_flags):
""" Called by pyaudio whenever audio data is available.
Updates the matplotlib figure.
"""
data = numpy.fromstring(in_data, dtype=numpy.float32)
data = numpy.reshape(data, (len(data)/self.num_channels,self.num_channels))
self.figure.draw(data)
return (None, paContinue)