本文整理汇总了Python中pyaudio.paInt16方法的典型用法代码示例。如果您正苦于以下问题:Python pyaudio.paInt16方法的具体用法?Python pyaudio.paInt16怎么用?Python pyaudio.paInt16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyaudio
的用法示例。
在下文中一共展示了pyaudio.paInt16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __enter__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
# format=pyaudio.paInt16,
format=pyaudio.paFloat32,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
input_device_index=self._device,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
示例2: __enter__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
#def __exit__(self, type, value, traceback):
示例3: run
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def run(self):
self.mic_stream = self.p.open(format=pyaudio.paInt16,
channels=self.channels,
rate=self.sample_rate,
input=True,
frames_per_buffer=self.blocksize)
self.running = True
while self.mic_stream and self.running:
input_data = self.mic_stream.read(self.blocksize)
if input_data:
self.audio_buffer.write(input_data)
#Shutdown record command
if(self.mic_stream):
self.mic_stream.close()
self.mic_stream = None
示例4: __init__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __init__(self):
# Audio stream input setup
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
self.CHUNK = 4096
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=self.CHUNK,
stream_callback=self.get_data)
self._buff = Queue.Queue() # Buffer to hold audio data
self.closed = False
# ROS Text Publisher
self.text_pub = rospy.Publisher('/google_client/text', String, queue_size=10)
# Context clues in yaml file
rospack = rospkg.RosPack()
yamlFileDir = rospack.get_path('dialogflow_ros') + '/config/context.yaml'
with open(yamlFileDir, 'r') as f:
self.context = yaml.load(f)
示例5: __init__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __init__(self):
# Audio stream input setup
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
self.CHUNK = 4096
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=self.CHUNK,
stream_callback=self._get_data)
self._buff = Queue.Queue() # Buffer to hold audio data
self.closed = False
# ROS Text Publisher
text_topic = rospy.get_param('/text_topic', '/dialogflow_text')
self.text_pub = rospy.Publisher(text_topic, String, queue_size=10)
示例6: __init__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __init__(self):
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 4096
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE,
input=True, frames_per_buffer=CHUNK,
stream_callback=self._callback)
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.read_list = [self.serversocket]
self._server_name = rospy.get_param('/dialogflow_client/server_name',
'127.0.0.1')
self._port = rospy.get_param('/dialogflow_client/port', 4444)
rospy.loginfo("DF_CLIENT: Audio Server Started!")
示例7: run
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def run(self):
self.logger.debug("Start to recording...")
self.logger.debug(" Time = %s"%self.time)
self.logger.debug(" Sample Rate = %s"%self.sr)
self.start_time = time.time()
pa=PyAudio()
stream=pa.open(format = paInt16,channels=1, rate=self.sr,input=True, frames_per_buffer=self.frames_per_buffer)
my_buf=[]
count=0
if self.time is None:
total_count = 1e10
else:
total_count = self.time * self.sr / self.batch_num
while count< total_count and self.__running.isSet():
datawav = stream.read(self.batch_num, exception_on_overflow = True)
datause = np.fromstring(datawav,dtype = np.short)
for w in datause:
self.buffer.put(w)
count+=1
stream.close()
示例8: __init__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __init__(self):
self.open = True
self.rate = 44100
self.frames_per_buffer = 1024
self.channels = 2
self.format = pyaudio.paInt16
self.audio_filename = "temp_audio.wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
# Audio starts being recorded
示例9: __enter__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __enter__(self):
with SuperManager.getInstance().commons.shutUpAlsaFFS():
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
示例10: main
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def main():
# prepare audio recorder
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
stream_callback=callback)
stream.start_stream()
# prepare keyboard listener
with keyboard.Listener(
on_press=on_press, on_release=on_release) as listener:
listener.join()
# close up
stream.stop_stream()
stream.close()
p.terminate()
示例11: __enter__
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
示例12: predict_file
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def predict_file(dec, pyaudio, path, frames, args, rate = 16000, format = pyaudio.paInt16, save = False):
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(pyaudio.get_sample_size(format))
wf.setframerate(rate)
#this code works for only for pulseaudio
#wf.writeframes(b''.join(frames))
wf.writeframes(frames)
wf.close()
results = dec.predict_file(path, feat_mode = args.feat_mode, feat_dim = args.feat_dim, three_d = args.three_d)
if save == False:
os.remove(path)
if args.predict_mode == 0:
task_outputs = dec.returnDiff(results)
elif args.predict_mode == 1:
task_outputs = dec.returnLabel(results)
else:
task_outputs = dec.returnClassDist(results)
return task_outputs
#main loop for speech emotion recognition
示例13: startStream
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def startStream(self):
self.stream = py_audio.open(format = pyaudio.paInt16,
channels = 1,
rate = self.device_rate,
input = True,
input_device_index = self.device_id,
frames_per_buffer = self.frames_per_buffer)
# overflows = 0
# prev_ovf_time = time.time()
while True:
try:
y = np.fromstring(self.stream.read(self.frames_per_buffer), dtype=np.int16)
y = y.astype(np.float32)
self.callback_func(y)
except IOError:
pass
# overflows += 1
# if time.time() > prev_ovf_time + 1:
# prev_ovf_time = time.time()
# if config.settings["configuration"]["USE_GUI"]:
# gui.label_error.setText('Audio buffer has overflowed {} times'.format(overflows))
# else:
# print('Audio buffer has overflowed {} times'.format(overflows))
示例14: get_microphone_level
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def get_microphone_level():
"""
source: http://stackoverflow.com/questions/26478315/getting-volume-levels-from-pyaudio-for-use-in-arduino
audioop.max alternative to audioop.rms
"""
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
s = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=chunk)
global levels
while True:
data = s.read(chunk)
mx = audioop.rms(data, 2)
if len(levels) >= 100:
levels = []
levels.append(mx)
示例15: run
# 需要导入模块: import pyaudio [as 别名]
# 或者: from pyaudio import paInt16 [as 别名]
def run(self):
pya = PyAudio()
self._stream = pya.open(
format=paInt16,
channels=1,
rate=SAMPLE_RATE,
input=True,
frames_per_buffer=WINDOW_SIZE,
stream_callback=self._process_frame,
)
self._stream.start_stream()
while self._stream.is_active() and not raw_input():
time.sleep(0.1)
self._stream.stop_stream()
self._stream.close()
pya.terminate()