本文整理汇总了Python中mycroft.session.SessionManager.touch方法的典型用法代码示例。如果您正苦于以下问题:Python SessionManager.touch方法的具体用法?Python SessionManager.touch怎么用?Python SessionManager.touch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mycroft.session.SessionManager
的用法示例。
在下文中一共展示了SessionManager.touch方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_wake_up
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process_wake_up(self, audio):
if self.wakeup_recognizer.is_recognized(audio.frame_data,
self.metrics):
SessionManager.touch()
self.state.sleeping = False
self.__speak("I'm awake.") # TODO: Localization
self.metrics.increment("mycroft.wakeup")
示例2: process_wake_word
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process_wake_word(self, audio, timer):
hyp = self.mycroft_recognizer.transcribe(audio.frame_data,
self.metrics)
if self.mycroft_recognizer.contains(hyp):
extractor = WordExtractor(audio, self.mycroft_recognizer,
self.metrics)
timer.lap()
extractor.calculate_range()
self.metrics.timer("mycroft.recognizer.extractor.time_s",
timer.lap())
audio_before = extractor.get_audio_data_before()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
self._audio_length(audio_before))
audio_after = extractor.get_audio_data_after()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
self._audio_length(audio_after))
SessionManager.touch()
payload = {
'utterance': hyp.hypstr,
'session': SessionManager.get().session_id,
'pos_begin': extractor.begin,
'pos_end': extractor.end
}
self.emitter.emit("recognizer_loop:wakeword", payload)
try:
self.transcribe([audio_before, audio_after])
except sr.UnknownValueError:
self.__speak("Go ahead")
self.state.skip_wakeword = True
self.metrics.increment("mycroft.wakeword")
示例3: wake_up
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def wake_up(self, audio):
if self.wakeup_recognizer.is_recognized(audio.frame_data,
self.metrics):
SessionManager.touch()
self.state.sleeping = False
self.__speak(mycroft.dialog.get("i am awake", self.stt.lang))
self.metrics.increment("mycroft.wakeup")
示例4: process
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process(self, audio):
SessionManager.touch()
payload = {
'utterance': self.wakeword_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warning("Audio too short to be processed")
else:
stopwatch = Stopwatch()
with stopwatch:
transcription = self.transcribe(audio)
if transcription:
ident = str(stopwatch.timestamp) + str(hash(transcription))
# STT succeeded, send the transcribed speech on for processing
payload = {
'utterances': [transcription],
'lang': self.stt.lang,
'session': SessionManager.get().session_id,
'ident': ident
}
self.emitter.emit("recognizer_loop:utterance", payload)
self.metrics.attr('utterances', [transcription])
else:
ident = str(stopwatch.timestamp)
# Report timing metrics
report_timing(ident, 'stt', stopwatch,
{'transcription': transcription,
'stt': self.stt.__class__.__name__})
示例5: process_skip_wake_word
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process_skip_wake_word(self, audio):
SessionManager.touch()
try:
self.transcribe([audio])
except sr.UnknownValueError:
logger.warn("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
self.metrics.increment("mycroft.recognizer.error")
self.state.skip_wakeword = False
示例6: process_audio
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process_audio(self, audio):
SessionManager.touch()
payload = {
'utterance': self.mycroft_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
try:
self.transcribe([audio])
except sr.UnknownValueError: # TODO: Localization
logger.warn("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
示例7: process
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process(self, audio):
SessionManager.touch()
payload = {
'utterance': self.wakeword_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warning("Audio too short to be processed")
else:
self.transcribe(audio)
示例8: process
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def process(self, audio):
SessionManager.touch()
payload = {
'utterance': self.mycroft_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warn("Audio too short to be processed")
elif connected():
self.transcribe(audio)
else:
self.__speak("Mycroft seems not to be connected to the Internet")
示例9: try_consume_audio
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def try_consume_audio(self):
timer = Stopwatch()
hyp = None
audio = self.queue.get()
self.metrics.timer("mycroft.recognizer.audio.length_s", self._audio_length(audio))
self.queue.task_done()
timer.start()
if self.state.sleeping:
hyp = self.wakeup_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
if hyp and hyp.hypstr:
logger.debug("sleeping recognition: " + hyp.hypstr)
if hyp and hyp.hypstr.lower().find("wake up") >= 0:
SessionManager.touch()
self.state.sleeping = False
self.__speak("I'm awake.") # TODO: Localization
self.metrics.increment("mycroft.wakeup")
else:
if not self.state.skip_wakeword:
hyp = self.ww_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
if hyp and hyp.hypstr.lower().find("mycroft") >= 0:
extractor = WakewordExtractor(audio, self.ww_recognizer, self.metrics)
timer.lap()
extractor.calculate_range()
self.metrics.timer("mycroft.recognizer.extractor.time_s", timer.lap())
audio_before = extractor.get_audio_data_before()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_before))
audio_after = extractor.get_audio_data_after()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_after))
SessionManager.touch()
payload = {
'utterance': hyp.hypstr,
'session': SessionManager.get().session_id,
'pos_begin': int(extractor.range.begin),
'pos_end': int(extractor.range.end)
}
self.emitter.emit("recognizer_loop:wakeword", payload)
try:
self.transcribe([audio_before, audio_after])
except sr.UnknownValueError:
self.__speak("Go ahead")
self.state.skip_wakeword = True
self.metrics.increment("mycroft.wakeword")
elif self.state.skip_wakeword:
SessionManager.touch()
try:
self.transcribe([audio])
except sr.UnknownValueError:
logger.warn("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
self.metrics.increment("mycroft.recognizer.error")
self.state.skip_wakeword = False
else:
self.metrics.clear()
self.metrics.flush()
示例10: wake_up
# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import touch [as 别名]
def wake_up(self, audio):
if self.wakeup_recognizer.found_wake_word(audio.frame_data):
SessionManager.touch()
self.state.sleeping = False
self.emitter.emit('recognizer_loop:awoken')
self.metrics.increment("mycroft.wakeup")