当前位置: 首页>>代码示例>>Python>>正文


Python PyAudio.get_sample_size方法代码示例

本文整理汇总了Python中pyaudio.PyAudio.get_sample_size方法的典型用法代码示例。如果您正苦于以下问题:Python PyAudio.get_sample_size方法的具体用法?Python PyAudio.get_sample_size怎么用?Python PyAudio.get_sample_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyaudio.PyAudio的用法示例。


在下文中一共展示了PyAudio.get_sample_size方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: record

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
 def record(self, time):
     audio = PyAudio()
     stream = audio.open(input_device_index=self.device_index,
                         output_device_index=self.device_index,
                         format=self.format,
                         channels=self.channel,
                         rate=self.rate,
                         input=True,
                         frames_per_buffer=self.chunk
                         )
     print "Recording..."
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print "Recording Complete"
     stream.close()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
开发者ID:AnanthaRajuC,项目名称:homecontrol,代码行数:28,代码来源:pygsr.py

示例2: button_record_Click

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
def button_record_Click():
    global sampwidth, wav_in, stream_in, state

    state = 1   # change the state flag into "recording"

    # update button states
    button_stop.configure(state = NORMAL)
    button_play.configure(state = DISABLED)
    button_back.configure(state = DISABLED)
    button_next.configure(state = DISABLED)
    button_record.configure(state = DISABLED)

    wav_in = PyAudio()
    stream_in = wav_in.open(format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = CHUNK) 
    sampwidth = wav_in.get_sample_size(FORMAT)
    echo_text.configure(text = "Recording...", bg = 'red', fg = 'white', font = ("Helvetica", 50))
    record_wav()
开发者ID:smilett,项目名称:PyRecorder,代码行数:19,代码来源:pyrecorder.py

示例3: record

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
 def record(self, time=5):
     audio = PyAudio()
     stream = audio.open(format=self.format, channels=self.channel,
                         rate=self.rate, input=True,
                         frames_per_buffer=self.chunk)
     print "RECORDING START"
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     stream.close()
     audio.terminate()
     print "RECORDING STOP"
     write_frames = open_audio(self.audio_file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
开发者ID:xSAVIKx,项目名称:py-audio-recognition,代码行数:23,代码来源:recognizer.py

示例4: record

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
 def record(self, time, device_i=None):
     audio = PyAudio()
     print audio.get_device_info_by_index(1)
     stream = audio.open(input_device_index=device_i,output_device_index=device_i,format=self.format, channels=self.channel,
                         rate=self.rate, input=True,
                         frames_per_buffer=self.chunk)
     playDing()
     print "REC: "
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print "END"
     stream.close()
     playDing()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
开发者ID:sageoffroy,项目名称:Pyranha,代码行数:25,代码来源:vox.py

示例5: Listener

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]

#.........这里部分代码省略.........
                    data = []
                    continue

                # The sound level has been below the sepcified level for
                # `max_silence_secs`, so the sound data needs to be
                # processed and recording stopped.
                print "***** done recording *****", time_total, max(Q_silence)
                is_recording = False

                # If the total recording time is less than the minimum
                # command time, then we should not process the data.
                if time_total >= self.min_cmd_secs:
                    self.process_data_sound(data, worker)

                # Reset the time, silence queue, and data.
                time_total = 0.0
                Q_silence.clear()
                data = []

        return True

    def process_data_sound(self, data, worker):
        # Add silence to the end of the recording since some
        # speech at the end tends to get cut off during the
        # recognition process.
        data.extend(self.data_silence)

        self.save_wav(data)
        flac = self.convert_wav_flac()
        res = self.stt_google(flac)

        if res:
            # if res[0]['confidence'] > 0.75:
                # print res[0]
                # return worker.transition(
                    # [res[0]["utterance"].lower()], True)
            # else:
            utters = []
            for item in res:
                utters.append(item["utterance"].lower())

            return worker.transition(utters, True)

        return False

    def stt_google(self, data):
        lang_code='en-US'
        googl_speech_url = 'https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&pfilter=2&lang=%s&maxresults=6' % (lang_code)
        hrs = {"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.63 Safari/535.7",'Content-type': 'audio/x-flac; rate=16000'}
        req = urllib2.Request(googl_speech_url, data=data, headers=hrs)
        p = urllib2.urlopen(req)
        j = json.loads(p.read())

        if "hypotheses" in j and len(j["hypotheses"]) > 0:
            return j["hypotheses"]

        return ""

    def save_wav(self, data, filename = "temp.wav"):
        f = wave.open(filename, 'wb')
        f.setnchannels(self.nchannels)
        f.setsampwidth(self.paudio.get_sample_size(self.aformat))
        f.setframerate(self.bitrate)
        f.writeframes(b''.join(data))
        f.close()

        return True

    def convert_wav_flac(self, filename = "temp.wav"):
        name, ext = path.splitext(filename)
        flac = "%s.flac" % name

        audiotools.open(filename).convert(flac, audiotools.FlacAudio)

        f = open(flac)
        data = f.read()
        f.close()

        return data

    def check_data_silence(self):
        if not path.exists(self.data_silence_name):
            print "Warning: Silence buffer not found! Generate one."
            return False

        f = wave.open(self.data_silence_name, 'rb')
        self.data_silence = f.readframes(f.getnframes())
        f.close()

        return True

    def generate_data_silence(self):
        data = []

        print "***** recording " + "*"*64
        for i in range(0, self.fragment):
            data.append(stream.read(self.chunk))
        print "***** done recording " + "*"*59

        return self.save_wav(data, self.data_silence_name)
开发者ID:Pent00,项目名称:Olivia,代码行数:104,代码来源:listener.py

示例6: PyAudio

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]

# INIT
p = PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)

plot_range = CHUNK_SIZE/2 + 1
		
		
# RECORD
print 'recording started'

while record:
	chunk = stream.read(CHUNK_SIZE)
	npdata = fromstring(chunk, dtype=int16)
	sample_width = p.get_sample_size(FORMAT)
	#print npdata, sample_width
	
	# silence check
	npdata = silence(npdata)
	
	Y = fft.rfft(npdata, CHUNK_SIZE)
	#print X, len(X)
	
	Y_abs = nplog(absolute(Y) + 1)
	
	#print Y_abs, len(Y_abs)
	#print max(Y_abs)
	
	
	record = quit()
开发者ID:ricsirke,项目名称:SoundFreqPlotter,代码行数:32,代码来源:freqAnal.py

示例7: len

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
#zplot.add_img_plot(zname="blah", zdata=mag_vec)#z, ydata=linspace(0, len(anr)-1, len(anr)), xdata=linspace(0, len(yok)-1, len(yok)))

#plot.add_plot("cross_sec", yname="Macvec1", ydata=c)
#    plot.add_plot("cross_se2", yname="Macvec2", ydata=mag_vec[:, 75])
plot.show()
for x in xrange(NUMBEROFFRAMES):
    WAVEDATA = WAVEDATA+chr(c[x])

#fill remainder of frameset with silence
for x in xrange(RESTFRAMES):
    WAVEDATA = WAVEDATA+chr(128)

p = PyAudio()
FORMAT=p.get_format_from_width(1)
stream = p.open(format = p.get_format_from_width(1),
                channels = 1,
                rate = BITRATE,
                output = True)
stream.write(WAVEDATA)
stream.stop_stream()
stream.close()
p.terminate()

if 0:
    import wave
    wf = wave.open('short_pulse.wav', 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(BITRATE)
    wf.writeframes(WAVEDATA)
    wf.close()
开发者ID:thomasaref,项目名称:Side_project_software,代码行数:33,代码来源:Soundplayer.py

示例8: listen

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
  def listen(self, level = 1000,timeout = 1,ignore_shoter_than = 0.5,ignore_longer_than = 5 ,language = "sv_SE", device_i=None):
    audio = PyAudio()
    #print audio.get_device_info_by_index(1)
    stream = audio.open(input_device_index=device_i,output_device_index=device_i,format=self.format, channels=self.channel,
                            rate=self.rate, input=True,
                            frames_per_buffer=self.chunk)

    timeout_chuncks = self.rate / self.chunk * timeout
    minmessage = self.rate / self.chunk * ignore_shoter_than
    maxmessage = self.rate / self.chunk * ignore_longer_than

    try:
	    while(True):
	   
		    print "Start listening... "
		    frames = []
		    data = ""
		    olddata = ""
		    self.count_silence = 0
		    self.active = False
		
		    while(True):  #for i in range(0, self.rate / self.chunk * time):
		      data = stream.read(self.chunk)
		      rms = audioop.rms(data, 2)
		
		      #print str(rms) + '\r'
		            
		      #There is some noise start recording
		      if rms > level:
			self.count_silence = 0
		        
			if self.active == False:
			  print "Recording..."
		          self.active = True
		          self.count_silence = 0
			  frames.append(olddata)
		
		      if self.active:       
		        frames.append(data)
		              
		      if rms < level and self.active:
		        self.count_silence += 1
		              
		      #If we have enough silence send for processing  
		      if (self.count_silence > timeout_chuncks) and self.active == True:
		        self.active = False
			#print len(frames) #10 12
			#print self.count_silence #8
			if not len(frames)> self.count_silence + minmessage:
			  print "Disregarding noise"
			  frames = []
			  continue
			if len(frames)> self.count_silence + maxmessage:
			  print "Ignoring to long recording"
			  frames = []
                          continue

			print "Processing..."
		        break
		    
		      olddata = data      
		 
		         
		    write_frames = open_audio(self.file, 'wb')
		    write_frames.setnchannels(self.channel)
		    write_frames.setsampwidth(audio.get_sample_size(self.format))
		    write_frames.setframerate(self.rate)
		    write_frames.writeframes(''.join(frames))
		    write_frames.close()
		    self.convert()
		
		    try:      
		    	phrase, complete_response = self.speech_to_text(language) # select the language
		    except:
			phrase = ""
			
		    print phrase

    except KeyboardInterrupt:
        # quit
        stream.stop_stream()
	    #print "END"
        stream.close()
        audio.terminate()
	sys.exit()
	
    return 
开发者ID:Anton04,项目名称:Speech2MQTT,代码行数:89,代码来源:Speech2MQTT.py

示例9: main

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
def main():
    class Beatcounter(object):
        def __init__(self, threshold = THRES, dtype=FORMAT):
            self.prev_envelope = 0
            self.envelope = 0

        def onset(self, signal):
            signal = fromstring(signal, FORMAT)
            self.envelope = 0
            for i in arange(len(signal)):
                sample = signal[i]
                self.envelope += abs(sample)
            if self.envelope - self.prev_envelope > THRES:
                self.prev_envelope = self.envelope
                return True
            else:
                self.prev_envelope = self.envelope
                return False


    def callback(in_data, frame_count, time_info, flag):
        if flag:
            print("Playback Error: %i" % flag)
        played_frames = callback.counter
        callback.counter += frame_count
        wf.writeframes(b''.join(in_data))
        if beatcounter.onset(in_data):
            callback.tempo = 60 / (time_info['current_time'] - callback.prev_time)
            if callback.tempo > 250:
                return in_data, paContinue
            callback.tapcounter += 1
            callback.prev_time = time_info['current_time']
            if callback.tapcounter != 1:
                print callback.tapcounter, callback.tempo
            else:
                print callback.tapcounter, "N/A"
            if callback.tapcounter >= 4:
                return in_data, paComplete
        return in_data, paContinue

    callback.counter = 0
    callback.tapcounter = 0
    callback.prev_time = 0
    pa = PyAudio()
    beatcounter = Beatcounter(THRES)
    wf = wave.open(WAVE_TEMPO_FILENAME, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(pa.get_sample_size(paInt16))
    wf.setframerate(FS)
    sleep(0.5)
    print("Tap four beat\n============")
    stream = pa.open(format = paInt16,
                     channels = CHANNELS,
                     input = True,
                     rate = FS,
                     frames_per_buffer = BLOCK_LENGTH,
                     output = False,
                     stream_callback = callback)
    while stream.is_active():
        sleep(0.1)
    stream.close()
    pa.terminate()
    wf.close()
    sleep(60 / callback.tempo)
    print("Record after four beat\n============")
    pa2 = PyAudio()
    wf2 = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wf2.setnchannels(CHANNELS)
    wf2.setsampwidth(pa.get_sample_size(paInt16))
    wf2.setframerate(FS)
    for i in arange(4):
        print('%d\a' % (4-i))
        sleep(60 / callback.tempo)
    print("Go\n=======")
    RECORD_SECONDS = 60 / callback.tempo * 8
    stream2 = pa2.open(format = paInt16,
                     channels = CHANNELS,
                     input = True,
                     rate = FS,
                     frames_per_buffer = BLOCK_LENGTH,
                     output = False)
    print("* recording")

    frames = []
    for i in range(0, int(FS / BLOCK_LENGTH * RECORD_SECONDS)):
        data = stream2.read(BLOCK_LENGTH)
        frames.append(data)

    print("* done recording")

    stream2.stop_stream()
    stream2.close()
    pa2.terminate()
    wf2.writeframes(b''.join(frames))
    wf2.close()
开发者ID:cheyuanl,项目名称:MLSP-Final,代码行数:97,代码来源:recorder.py

示例10: __init__

# 需要导入模块: from pyaudio import PyAudio [as 别名]
# 或者: from pyaudio.PyAudio import get_sample_size [as 别名]
class AudioTool:
    '''
    This function include record and play, if you want to play and record,
    please set the play is True.
    The sample rate is 44100
    Bit:16
    '''
    def __init__(self):
        self.chunk = 1024
        self.channels = 2
        self.samplerate = 44100
        self.format = paInt16
        #open audio stream
        self.pa = PyAudio()
        self.save_buffer = []
    
    def record_play(self,seconds,play=False,file_play_path=None,file_save_path=None):

        NUM = int((self.samplerate/float(self.chunk)) * seconds)

        if play is True:
            swf = wave.open(file_play_path, 'rb')
        
        stream = self.pa.open(
                        format   = self.format, 
                        channels = self.channels, 
                        rate     = self.samplerate, 
                        input    = True,
                        output   = play,
                        frames_per_buffer  = self.chunk
                        )
        # wave_data = []
        while NUM:
            data = stream.read(self.chunk)
            self.save_buffer.append(data)
            wave_data=np.fromstring(data, dtype = np.short)
            wave_data.shape = -1,2
            wave_data = wave_data.T #transpose multiprocessing.Process
            # print int(data)
            print wave_data
            NUM -= 1
            if play is True:
                data = swf.readframes(self.chunk)

                stream.write(data)
                if data == " ": break

        if play is True:
            swf.close()
        #stop stream
        stream.stop_stream()
        stream.close()

        # save wav file
        def _save_wave_file(filename,data):
            wf_save = wave.open(filename, 'wb')
            wf_save.setnchannels(self.channels)
            wf_save.setsampwidth(self.pa.get_sample_size(self.format))
            wf_save.setframerate(self.samplerate)
            wf_save.writeframes("".join(data))
            wf_save.close()

        _save_wave_file(file_save_path, self.save_buffer)
        del self.save_buffer[:]
        print file_save_path," Record Sucessful!"

    

    def play(self,filepath):

        wf = wave.open(filepath, 'rb')

        stream =self.pa.open(
                        format   = self.pa.get_format_from_width(wf.getsampwidth()), 
                        channels = wf.getnchannels(), 
                        rate     = wf.getframerate(), 
                        output   = True,
                        )

        NUM = int(wf.getframerate()/self.chunk * 15)

        print "playing.."
        while NUM:
            data = wf.readframes(self.chunk)
            if data == " ": break
            stream.write(data)
            NUM -= 1
        stream.stop_stream()
        del data
        stream.close()

    def close(self):
        
        self.pa.terminate()
开发者ID:Hoohaha,项目名称:Auana-P,代码行数:96,代码来源:tool.py


注:本文中的pyaudio.PyAudio.get_sample_size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。