當前位置: 首頁>>代碼示例>>Python>>正文


Python pyaudio.paInt16方法代碼示例

本文整理匯總了Python中pyaudio.paInt16方法的典型用法代碼示例。如果您正苦於以下問題:Python pyaudio.paInt16方法的具體用法?Python pyaudio.paInt16怎麽用?Python pyaudio.paInt16使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pyaudio的用法示例。


在下文中一共展示了pyaudio.paInt16方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __enter__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __enter__(self):
        self._audio_interface = pyaudio.PyAudio()
        self._audio_stream = self._audio_interface.open(
            # format=pyaudio.paInt16,
            format=pyaudio.paFloat32,
            # The API currently only supports 1-channel (mono) audio
            # https://goo.gl/z757pE
            channels=1,
            rate=self._rate,
            input=True,
            frames_per_buffer=self._chunk,
            input_device_index=self._device,
            # Run the audio stream asynchronously to fill the buffer object.
            # This is necessary so that the input device's buffer doesn't
            # overflow while the calling thread makes network requests, etc.
            stream_callback=self._fill_buffer,
        )

        self.closed = False

        return self 
開發者ID:pytorch,項目名稱:audio,代碼行數:23,代碼來源:vad.py

示例2: __enter__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __enter__(self):
		self._audio_interface = pyaudio.PyAudio()
		self._audio_stream = self._audio_interface.open(
			format=pyaudio.paInt16,
			channels=1, rate=self._rate,
			input=True, frames_per_buffer=self._chunk,
			# Run the audio stream asynchronously to fill the buffer object.
			# This is necessary so that the input device's buffer doesn't
			# overflow while the calling thread makes network requests, etc.
			stream_callback=self._fill_buffer,
		)

		self.closed = False

		return self

	#def __exit__(self, type, value, traceback): 
開發者ID:gigagenie,項目名稱:ai-makers-kit,代碼行數:19,代碼來源:proj2_yt_mvp.py

示例3: run

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def run(self):
		self.mic_stream = self.p.open(format=pyaudio.paInt16,
			channels=self.channels,
			rate=self.sample_rate,
			input=True,
			frames_per_buffer=self.blocksize)
			
		self.running = True
		while self.mic_stream and self.running:
			input_data = self.mic_stream.read(self.blocksize)
			if input_data:
				self.audio_buffer.write(input_data)

		#Shutdown record command
		if(self.mic_stream):
			self.mic_stream.close()
			self.mic_stream = None 
開發者ID:nyumaya,項目名稱:nyumaya_audio_recognition,代碼行數:19,代碼來源:cross_record.py

示例4: __init__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __init__(self):
        # Audio stream input setup
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 16000
        self.CHUNK = 4096
        self.audio = pyaudio.PyAudio()
        self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,
                                      rate=RATE, input=True,
                                      frames_per_buffer=self.CHUNK,
                                      stream_callback=self.get_data)
        self._buff = Queue.Queue()  # Buffer to hold audio data
        self.closed = False

        # ROS Text Publisher
        self.text_pub = rospy.Publisher('/google_client/text', String, queue_size=10)

        # Context clues in yaml file
        rospack = rospkg.RosPack()
        yamlFileDir = rospack.get_path('dialogflow_ros') + '/config/context.yaml'
        with open(yamlFileDir, 'r') as f:
            self.context = yaml.load(f) 
開發者ID:piraka9011,項目名稱:dialogflow_ros,代碼行數:24,代碼來源:google_client.py

示例5: __init__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __init__(self):
        # Audio stream input setup
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 16000
        self.CHUNK = 4096
        self.audio = pyaudio.PyAudio()
        self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,
                                      rate=RATE, input=True,
                                      frames_per_buffer=self.CHUNK,
                                      stream_callback=self._get_data)
        self._buff = Queue.Queue()  # Buffer to hold audio data
        self.closed = False

        # ROS Text Publisher
        text_topic = rospy.get_param('/text_topic', '/dialogflow_text')
        self.text_pub = rospy.Publisher(text_topic, String, queue_size=10) 
開發者ID:piraka9011,項目名稱:dialogflow_ros,代碼行數:19,代碼來源:mic_client.py

示例6: __init__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __init__(self):
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 16000
        CHUNK = 4096
        self.audio = pyaudio.PyAudio()
        self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE,
                                      input=True, frames_per_buffer=CHUNK,
                                      stream_callback=self._callback)
        self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.read_list = [self.serversocket]

        self._server_name = rospy.get_param('/dialogflow_client/server_name',
                                            '127.0.0.1')
        self._port = rospy.get_param('/dialogflow_client/port', 4444)

        rospy.loginfo("DF_CLIENT: Audio Server Started!") 
開發者ID:piraka9011,項目名稱:dialogflow_ros,代碼行數:19,代碼來源:audio_server.py

示例7: run

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def run(self):
        self.logger.debug("Start to recording...")
        self.logger.debug("  Time = %s"%self.time)
        self.logger.debug("  Sample Rate = %s"%self.sr)
        self.start_time = time.time()
        pa=PyAudio()
        stream=pa.open(format = paInt16,channels=1, rate=self.sr,input=True, frames_per_buffer=self.frames_per_buffer)
        my_buf=[]
        count=0
        if self.time is None:
            total_count = 1e10
        else:
            total_count = self.time * self.sr / self.batch_num
        while count< total_count and self.__running.isSet():
            datawav = stream.read(self.batch_num, exception_on_overflow = True)
            datause = np.fromstring(datawav,dtype = np.short)
            for w in datause:
                self.buffer.put(w)
            count+=1
        stream.close() 
開發者ID:mhy12345,項目名稱:rcaudio,代碼行數:22,代碼來源:core_recorder.py

示例8: __init__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __init__(self):
        
        self.open = True
        self.rate = 44100
        self.frames_per_buffer = 1024
        self.channels = 2
        self.format = pyaudio.paInt16
        self.audio_filename = "temp_audio.wav"
        self.audio = pyaudio.PyAudio()
        self.stream = self.audio.open(format=self.format,
                                      channels=self.channels,
                                      rate=self.rate,
                                      input=True,
                                      frames_per_buffer = self.frames_per_buffer)
        self.audio_frames = []


    # Audio starts being recorded 
開發者ID:JRodrigoF,項目名稱:AVrecordeR,代碼行數:20,代碼來源:AVrecordeR.py

示例9: __enter__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __enter__(self):
		with SuperManager.getInstance().commons.shutUpAlsaFFS():
			self._audio_interface = pyaudio.PyAudio()

		self._audio_stream = self._audio_interface.open(
			format=pyaudio.paInt16,
			# The API currently only supports 1-channel (mono) audio
			# https://goo.gl/z757pE
			channels=1, rate=self._rate,
			input=True, frames_per_buffer=self._chunk,
			# Run the audio stream asynchronously to fill the buffer object.
			# This is necessary so that the input device's buffer doesn't
			# overflow while the calling thread makes network requests, etc.
			stream_callback=self._fill_buffer,
		)

		self.closed = False

		return self 
開發者ID:project-alice-assistant,項目名稱:ProjectAlice,代碼行數:21,代碼來源:MicrophoneStream.py

示例10: main

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def main():
    # prepare audio recorder
    p = pyaudio.PyAudio()
    stream = p.open(
        format=pyaudio.paInt16,
        channels=1,
        rate=16000,
        input=True,
        stream_callback=callback)
    stream.start_stream()

    # prepare keyboard listener
    with keyboard.Listener(
            on_press=on_press, on_release=on_release) as listener:
        listener.join()

    # close up
    stream.stop_stream()
    stream.close()
    p.terminate() 
開發者ID:Pelhans,項目名稱:ZASR_tensorflow,代碼行數:22,代碼來源:demo_client.py

示例11: __enter__

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def __enter__(self):
        self._audio_interface = pyaudio.PyAudio()
        self._audio_stream = self._audio_interface.open(
            format=pyaudio.paInt16,
            # The API currently only supports 1-channel (mono) audio
            # https://goo.gl/z757pE
            channels=1, rate=self._rate,
            input=True, frames_per_buffer=self._chunk,
            # Run the audio stream asynchronously to fill the buffer object.
            # This is necessary so that the input device's buffer doesn't
            # overflow while the calling thread makes network requests, etc.
            stream_callback=self._fill_buffer,
        )

        self.closed = False

        return self 
開發者ID:GoogleCloudPlatform,項目名稱:python-docs-samples,代碼行數:19,代碼來源:transcribe_streaming_mic.py

示例12: predict_file

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def predict_file(dec, pyaudio, path, frames, args, rate = 16000, format = pyaudio.paInt16, save = False):
    wf = wave.open(path, 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(pyaudio.get_sample_size(format))
    wf.setframerate(rate)
    #this code works for only for pulseaudio
    #wf.writeframes(b''.join(frames))
    wf.writeframes(frames)
    wf.close()

    results = dec.predict_file(path, feat_mode = args.feat_mode, feat_dim = args.feat_dim, three_d = args.three_d)
    
    if save == False:
        os.remove(path)
    if args.predict_mode == 0:
        task_outputs = dec.returnDiff(results)
    elif args.predict_mode == 1:
        task_outputs = dec.returnLabel(results)
    else:
        task_outputs = dec.returnClassDist(results)
    return task_outputs

#main loop for speech emotion recognition 
開發者ID:batikim09,項目名稱:LIVE_SER,代碼行數:25,代碼來源:offline_ser.py

示例13: startStream

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def startStream(self):
        self.stream = py_audio.open(format = pyaudio.paInt16,
                                    channels = 1,
                                    rate = self.device_rate,
                                    input = True,
                                    input_device_index = self.device_id,
                                    frames_per_buffer = self.frames_per_buffer)
            
        # overflows = 0
        # prev_ovf_time = time.time()
        while True:
            try:
                y = np.fromstring(self.stream.read(self.frames_per_buffer), dtype=np.int16)
                y = y.astype(np.float32)
                self.callback_func(y)
            except IOError:
                pass
                # overflows += 1
                # if time.time() > prev_ovf_time + 1:
                #     prev_ovf_time = time.time()
                #     if config.settings["configuration"]["USE_GUI"]:
                #         gui.label_error.setText('Audio buffer has overflowed {} times'.format(overflows))
                #     else:
                #         print('Audio buffer has overflowed {} times'.format(overflows)) 
開發者ID:not-matt,項目名稱:Systematic-LEDs,代碼行數:26,代碼來源:main.py

示例14: get_microphone_level

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def get_microphone_level():
    """
    source: http://stackoverflow.com/questions/26478315/getting-volume-levels-from-pyaudio-for-use-in-arduino
    audioop.max alternative to audioop.rms
    """
    chunk = 1024
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 44100
    p = pyaudio.PyAudio()

    s = p.open(format=FORMAT,
               channels=CHANNELS,
               rate=RATE,
               input=True,
               frames_per_buffer=chunk)
    global levels
    while True:
        data = s.read(chunk)
        mx = audioop.rms(data, 2)
        if len(levels) >= 100:
            levels = []
        levels.append(mx) 
開發者ID:atuldo,項目名稱:real-time-plot-microphone-kivy,代碼行數:25,代碼來源:main.py

示例15: run

# 需要導入模塊: import pyaudio [as 別名]
# 或者: from pyaudio import paInt16 [as 別名]
def run(self):
        pya = PyAudio()
        self._stream = pya.open(
            format=paInt16,
            channels=1,
            rate=SAMPLE_RATE,
            input=True,
            frames_per_buffer=WINDOW_SIZE,
            stream_callback=self._process_frame,
        )
        self._stream.start_stream()

        while self._stream.is_active() and not raw_input():
            time.sleep(0.1)

        self._stream.stop_stream()
        self._stream.close()
        pya.terminate() 
開發者ID:aniawsz,項目名稱:rtmonoaudio2midi,代碼行數:20,代碼來源:audiostream.py


注:本文中的pyaudio.paInt16方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。