本文整理汇总了Python中siriObjects.uiObjects.UIAssistantUtteranceView.text方法的典型用法代码示例。如果您正苦于以下问题:Python UIAssistantUtteranceView.text方法的具体用法?Python UIAssistantUtteranceView.text怎么用?Python UIAssistantUtteranceView.text使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类siriObjects.uiObjects.UIAssistantUtteranceView
的用法示例。
在下文中一共展示了UIAssistantUtteranceView.text方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_recognized_speech
# 需要导入模块: from siriObjects.uiObjects import UIAssistantUtteranceView [as 别名]
# 或者: from siriObjects.uiObjects.UIAssistantUtteranceView import text [as 别名]
def process_recognized_speech(self, googleJson, requestId, dictation):
possible_matches = googleJson['hypotheses']
if len(possible_matches) > 0:
best_match = possible_matches[0]['utterance']
if len(best_match) == 1:
best_match = best_match.upper()
else:
best_match = best_match[0].upper() + best_match[1:]
best_match_confidence = possible_matches[0]['confidence']
self.logger.info(u"Best matching result: \"{0}\" with a confidence of {1}%".format(best_match, round(float(best_match_confidence) * 100, 2)))
# construct a SpeechRecognized
token = Token(best_match, 0, 0, 1000.0, True, True)
interpretation = Interpretation([token])
phrase = Phrase(lowConfidence=False, interpretations=[interpretation])
recognition = Recognition([phrase])
recognized = SpeechRecognized(requestId, recognition)
if not dictation:
if self.current_running_plugin == None:
plugin = PluginManager.getPluginForImmediateExecution(self.assistant.assistantId, best_match, self.assistant.language, (self.send_object, self.send_plist, self.assistant, self.current_location))
if plugin != None:
plugin.refId = requestId
plugin.connection = self
self.current_running_plugin = plugin
self.send_object(recognized)
self.current_running_plugin.start()
else:
self.send_object(recognized)
view = UIAddViews(requestId)
errorText = SiriProtocolHandler.__not_recognized[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__not_recognized else SiriProtocolHandler.__not_recognized["en-US"]
errorView = UIAssistantUtteranceView()
errorView.text = errorText.format(best_match)
errorView.speakableText = errorText.format(best_match)
view.views = [errorView]
websearchText = SiriProtocolHandler.__websearch[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__websearch else SiriProtocolHandler.__websearch["en-US"]
button = UIButton()
button.text = websearchText
cmd = SendCommands()
cmd.commands = [StartRequest(utterance=u"^webSearchQuery^=^{0}^^webSearchConfirmation^=^Yes^".format(best_match))]
button.commands = [cmd]
view.views.append(button)
self.send_object(view)
self.send_object(RequestCompleted(requestId))
elif self.current_running_plugin.waitForResponse != None:
# do we need to send a speech recognized here? i.d.k
self.current_running_plugin.response = best_match
self.current_running_plugin.refId = requestId
self.current_running_plugin.waitForResponse.set()
else:
self.send_object(recognized)
self.send_object(RequestCompleted(requestId))
else:
self.send_object(recognized)
self.send_object(RequestCompleted(requestId))