本文整理匯總了Python中wit.Wit.post_speech方法的典型用法代碼示例。如果您正苦於以下問題:Python Wit.post_speech方法的具體用法?Python Wit.post_speech怎麽用?Python Wit.post_speech使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類wit.Wit
的用法示例。
在下文中一共展示了Wit.post_speech方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: event_loop
# 需要導入模塊: from wit import Wit [as 別名]
# 或者: from wit.Wit import post_speech [as 別名]
def event_loop():
wit = Wit(wit_token())
my_mic = Mic(DEFAULT_DICT, DEFAULT_LANG, DEFAULT_DICT, DEFAULT_LANG)
while True:
# listen for activation hotword
try:
threshold, text = my_mic.passiveListen(PERSONA)
except:
continue
# detected hotword
if threshold:
audio_file = activeListenFile(threshold)
if audio_file:
data = None
try:
# retrieve wit intent
data = wit.post_speech(open(audio_file))
# send to handler service
raise NotImplementedError('no handler code yet')
except Exception as e:
print "Exception in audio_file handling:"
print str(e)
if data:
print "Data: "
print pprint(data)
示例2: DecodeWaveFile
# 需要導入模塊: from wit import Wit [as 別名]
# 或者: from wit.Wit import post_speech [as 別名]
def DecodeWaveFile(self, waveFileName):
"""Build a speech decode request around Wit"""
# Form a query for Wit speech recognition
w = Wit(self.witToken)
try:
audio = open(waveFileName)
return WitAiQueryResponse(w.post_speech(audio))
except:
raise
示例3: Recorder
# 需要導入模塊: from wit import Wit [as 別名]
# 或者: from wit.Wit import post_speech [as 別名]
class Recorder(threading.Thread):
def __init__(self, lisa_client, listener):
# Init thread class
threading.Thread.__init__(self)
self._stopevent = threading.Event()
self.lisa_client = lisa_client
self.configuration = ConfigManagerSingleton.get().getConfiguration()
self.pipeline = listener.get_pipeline()
self.capture_buffers = deque([])
self.running_state = False
self.wit = Wit(self.configuration['wit_token'])
self.wit_confidence = 0.5
if self.configuration.has_key('confidence'):
self.wit_confidence = self.configuration['wit_confidence']
self.record_time_start = 0
self.record_time_end = 0
# Get app sink
self.rec_sink = self.pipeline.get_by_name('rec_sink')
self.rec_sink.connect('new-buffer', self._capture_audio_buffer)
# Configure vader
# Using vader on pocketsphinx branch and not a vader on record branch,
# because vader forces stream to 8KHz, so record quality would be worst
vader = self.pipeline.get_by_name('vad_asr')
vader.connect('vader-start', self._vader_start)
vader.connect('vader-stop', self._vader_stop)
# Get elements to connect/disconnect pockesphinx during record
self.asr_tee = self.pipeline.get_by_name('asr_tee')
self.asr_sink = self.pipeline.get_by_name('asr_sink')
self.asr = self.pipeline.get_by_name('asr')
self.asr_tee.unlink(self.asr_sink)
# Start thread
self.start()
def stop(self):
# Raise stop event
self.running_state = False
self._stopevent.set()
def get_running_state(self):
"""
Is the recorder recording?
"""
return self.running_state
def set_running_state(self, running):
"""
Start/Stop a voice record
"""
if running == True and self.running_state == False:
self.running_state = True
# Disconnect pocketsphinx from pipeline
self.asr_tee.link(self.asr_sink)
self.asr_tee.unlink(self.asr)
elif running == True and self.running_state == True:
self.running_state = False
def run(self):
"""
Recorder main loop
"""
CONTENT_TYPE = 'audio/mpeg3'
result = ""
retry = 1
# Thread loop
while not self._stopevent.isSet():
# Wait record order
if self.running_state == False:
sleep(.1)
continue
# Activate capture, wait for 2s of silence before cancelling
wit_e = None
self.record_time_start = 0
self.record_time_end = time.time() + 2
self.capture_buffers.clear()
result = ""
print '\n [Recording]' + ' ' * 20 + '[Recording]'
# Send captured voice to wit
try:
result = self.wit.post_speech(data = self._read_audio_buffer(), content_type=CONTENT_TYPE)
except Exception as e:
wit_e = e
# If record was stopped during recording
if self.running_state == True:
# If Wit did not succeeded
if len(result) == 0 or result.has_key('outcome') == False or result['outcome'].has_key('confidence') == False or result['outcome']['confidence'] < self.wit_confidence:
if wit_e is not None:
log.err("Wit exception : " + str(e))
# If retry is available and vader detected an utterance
#.........這裏部分代碼省略.........
示例4: raw_input
# 需要導入模塊: from wit import Wit [as 別名]
# 或者: from wit.Wit import post_speech [as 別名]
# Standard Modules
import sys
import os
# Modules from the GitHub
from wit import Wit
# The chunks
from recorder import Recorder
from interpreter import Interpreter
# Constants
SECONDS = 4
if __name__ == '__main__':
# Set the Wit.AI token from an environment variable
if 'WIT_TOKEN' not in os.environ:
os.environ['WIT_TOKEN'] = raw_input("Enter your Wit.AI token: ")
witToken = os.environ['WIT_TOKEN']
# Instantiate the chunks
aRecording = Recorder(SECONDS)
anInterpreting = Interpreter()
witty = Wit(witToken)
# Run with it
audio_file = aRecording.record()
result = witty.post_speech(audio_file.getvalue())
anInterpreting.interpret(result)
# And we're done
sys.exit(0)
示例5: Recorder
# 需要導入模塊: from wit import Wit [as 別名]
# 或者: from wit.Wit import post_speech [as 別名]
#.........這裏部分代碼省略.........
continue
# Send activated record to Wit
wit_e = None
result = ""
try:
if self.configuration['asr'] == "ispeech":
for b in self._readAudioBuffer(file_mode = True):
pass
params= {}
params["action"] = "recognize"
params["apikey"] = "developerdemokeydeveloperdemokey"
params["freeform"] = "3"
params["locale"] = "fr-FR"
params["output"] = "json"
params["content-type"] = "speex"
params["speexmode"] = "2"
params["audio"] = base64.b64encode(open(self.temp_file, 'rt').read()).replace(b'\n',b'')
result = requests.get("http://api.ispeech.org/api/rest?" + urlencode(params))
result = self.wit.get_message(query = result.json()['text'], context = self.wit_context)
elif self.configuration['asr'] == "google":
for b in self._readAudioBuffer(file_mode = True):
pass
url = 'https://www.google.com/speech-api/v2/recognize?output=json&lang=fr-fr&key=AIzaSyCQv4U1mTaw_r_j1bWb1peeaTihzV0q-EQ'
audio = open(self.temp_file, "rb").read()
header = {"Content-Type": "audio/x-flac; rate=16000"}
post = urlopen(Request(url, audio, header))
result = loads(post.read().split("\n")[1])['result'][0]['alternative'][0]['transcript']
result = self.wit.get_message(query = result, context = self.wit_context)
# Defautl Wit
else:
result = self.wit.post_speech(data = self._readAudioBuffer(), content_type = CONTENT_TYPE, context = self.wit_context)
result['msg_body'] = result['msg_body'].encode("utf-8")
except Exception as e:
wit_e = e
# If record was stopped during Wit access
if self._stopevent.isSet():
break
# Question mode
if len(result) > 0 and self.continuous_mode == True and result.has_key('msg_body') == True and len(result['msg_body']) > 0:
# Send answer
self.factory.sendChatToServer(message = result['msg_body'], outcome = result['outcome'])
# If Wit did not succeeded
elif len(result) == 0 or result.has_key('outcome') == False or result['outcome'].has_key('confidence') == False or result['outcome']['confidence'] < self.configuration['wit_confidence']:
if wit_e is not None:
log.err("Wit exception : {0}".format(str(e)))
elif len(result) == 0:
log.err("No response from Wit")
elif result.has_key('outcome') == False or result['outcome'].has_key('confidence') == False:
log.err("Wit response syntax error")
log.err("result : {0}".format(result))
elif result['outcome']['confidence'] < self.configuration['wit_confidence']:
log.err("Wit confidence {confidence} too low : {result}".format(confidence = result['outcome']['confidence'], result = result['msg_body']))
else:
log.err("Error response from Wit : {0}".format(result['msg_body']))
# Send recognized intent to the server
else:
log.msg("Wit result : {0}".format(result['msg_body']))
self.factory.sendChatToServer(message = result['msg_body'], outcome = result['outcome'])