本文整理汇总了Python中mycroft.session.SessionManager类的典型用法代码示例。如果您正苦于以下问题:Python SessionManager类的具体用法?Python SessionManager怎么用?Python SessionManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SessionManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_wake_word
def process_wake_word(self, audio, timer):
hyp = self.mycroft_recognizer.transcribe(audio.frame_data,
self.metrics)
if self.mycroft_recognizer.contains(hyp):
extractor = WordExtractor(audio, self.mycroft_recognizer,
self.metrics)
timer.lap()
extractor.calculate_range()
self.metrics.timer("mycroft.recognizer.extractor.time_s",
timer.lap())
audio_before = extractor.get_audio_data_before()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
self._audio_length(audio_before))
audio_after = extractor.get_audio_data_after()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
self._audio_length(audio_after))
SessionManager.touch()
payload = {
'utterance': hyp.hypstr,
'session': SessionManager.get().session_id,
'pos_begin': extractor.begin,
'pos_end': extractor.end
}
self.emitter.emit("recognizer_loop:wakeword", payload)
try:
self.transcribe([audio_before, audio_after])
except sr.UnknownValueError:
self.__speak("Go ahead")
self.state.skip_wakeword = True
self.metrics.increment("mycroft.wakeword")
示例2: wake_up
def wake_up(self, audio):
if self.wakeup_recognizer.is_recognized(audio.frame_data,
self.metrics):
SessionManager.touch()
self.state.sleeping = False
self.__speak(mycroft.dialog.get("i am awake", self.stt.lang))
self.metrics.increment("mycroft.wakeup")
示例3: process_wake_up
def process_wake_up(self, audio):
if self.wakeup_recognizer.is_recognized(audio.frame_data,
self.metrics):
SessionManager.touch()
self.state.sleeping = False
self.__speak("I'm awake.") # TODO: Localization
self.metrics.increment("mycroft.wakeup")
示例4: process
def process(self, audio):
SessionManager.touch()
payload = {
'utterance': self.wakeword_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warning("Audio too short to be processed")
else:
stopwatch = Stopwatch()
with stopwatch:
transcription = self.transcribe(audio)
if transcription:
ident = str(stopwatch.timestamp) + str(hash(transcription))
# STT succeeded, send the transcribed speech on for processing
payload = {
'utterances': [transcription],
'lang': self.stt.lang,
'session': SessionManager.get().session_id,
'ident': ident
}
self.emitter.emit("recognizer_loop:utterance", payload)
self.metrics.attr('utterances', [transcription])
else:
ident = str(stopwatch.timestamp)
# Report timing metrics
report_timing(ident, 'stt', stopwatch,
{'transcription': transcription,
'stt': self.stt.__class__.__name__})
示例5: process_skip_wake_word
def process_skip_wake_word(self, audio):
SessionManager.touch()
try:
self.transcribe([audio])
except sr.UnknownValueError:
logger.warn("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
self.metrics.increment("mycroft.recognizer.error")
self.state.skip_wakeword = False
示例6: process_audio
def process_audio(self, audio):
SessionManager.touch()
payload = {
'utterance': self.mycroft_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
try:
self.transcribe([audio])
except sr.UnknownValueError: # TODO: Localization
logger.warn("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
示例7: process
def process(self, audio):
SessionManager.touch()
payload = {
'utterance': self.wakeword_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warning("Audio too short to be processed")
else:
self.transcribe(audio)
示例8: process
def process(self, audio):
SessionManager.touch()
payload = {
'utterance': self.mycroft_recognizer.key_phrase,
'session': SessionManager.get().session_id,
}
self.emitter.emit("recognizer_loop:wakeword", payload)
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warn("Audio too short to be processed")
elif connected():
self.transcribe(audio)
else:
self.__speak("Mycroft seems not to be connected to the Internet")
示例9: transcribe
def transcribe(self, audio):
text = None
try:
# Invoke the STT engine on the audio clip
text = self.stt.execute(audio).lower().strip()
LOG.debug("STT: " + text)
except sr.RequestError as e:
LOG.error("Could not request Speech Recognition {0}".format(e))
except ConnectionError as e:
LOG.error("Connection Error: {0}".format(e))
self.emitter.emit("recognizer_loop:no_internet")
except HTTPError as e:
if e.response.status_code == 401:
text = "pair my device" # phrase to start the pairing process
LOG.warning("Access Denied at mycroft.ai")
except Exception as e:
LOG.error(e)
LOG.error("Speech Recognition could not understand audio")
if text:
# STT succeeded, send the transcribed speech on for processing
payload = {
'utterances': [text],
'lang': self.stt.lang,
'session': SessionManager.get().session_id
}
self.emitter.emit("recognizer_loop:utterance", payload)
self.metrics.attr('utterances', [text])
示例10: transcribe
def transcribe(self, audio_segments):
utterances = []
threads = []
if connected():
for audio in audio_segments:
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
logger.debug("Audio too short to send to STT")
continue
target = self._create_remote_stt_runnable(audio, utterances)
t = threading.Thread(target=target)
t.start()
threads.append(t)
for thread in threads:
thread.join()
if len(utterances) > 0:
payload = {
'utterances': utterances,
'session': SessionManager.get().session_id
}
self.emitter.emit("recognizer_loop:utterance", payload)
self.metrics.attr('utterances', utterances)
else:
raise sr.UnknownValueError
else: # TODO: Localization
self.__speak("This device is not connected to the Internet")
示例11: publish
def publish(self, events):
if 'session_id' not in events:
session_id = SessionManager.get().session_id
events['session_id'] = session_id
if self.enabled:
requests.post(
self.url,
headers={'Content-Type': 'application/json'},
data=json.dumps(events), verify=False)
示例12: try_consume_audio
def try_consume_audio(self):
timer = Stopwatch()
hyp = None
audio = self.queue.get()
self.metrics.timer("mycroft.recognizer.audio.length_s", self._audio_length(audio))
self.queue.task_done()
timer.start()
if self.state.sleeping:
hyp = self.wakeup_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
if hyp and hyp.hypstr:
logger.debug("sleeping recognition: " + hyp.hypstr)
if hyp and hyp.hypstr.lower().find("wake up") >= 0:
SessionManager.touch()
self.state.sleeping = False
self.__speak("I'm awake.") # TODO: Localization
self.metrics.increment("mycroft.wakeup")
else:
if not self.state.skip_wakeword:
hyp = self.ww_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
if hyp and hyp.hypstr.lower().find("mycroft") >= 0:
extractor = WakewordExtractor(audio, self.ww_recognizer, self.metrics)
timer.lap()
extractor.calculate_range()
self.metrics.timer("mycroft.recognizer.extractor.time_s", timer.lap())
audio_before = extractor.get_audio_data_before()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_before))
audio_after = extractor.get_audio_data_after()
self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_after))
SessionManager.touch()
payload = {
'utterance': hyp.hypstr,
'session': SessionManager.get().session_id,
'pos_begin': int(extractor.range.begin),
'pos_end': int(extractor.range.end)
}
self.emitter.emit("recognizer_loop:wakeword", payload)
try:
self.transcribe([audio_before, audio_after])
except sr.UnknownValueError:
self.__speak("Go ahead")
self.state.skip_wakeword = True
self.metrics.increment("mycroft.wakeword")
elif self.state.skip_wakeword:
SessionManager.touch()
try:
self.transcribe([audio])
except sr.UnknownValueError:
logger.warn("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that.")
self.metrics.increment("mycroft.recognizer.error")
self.state.skip_wakeword = False
else:
self.metrics.clear()
self.metrics.flush()
示例13: transcribe
def transcribe(self, audio):
text = None
try:
text = self.stt.execute(audio).lower().strip()
LOG.debug("STT: " + text)
except sr.RequestError as e:
LOG.error("Could not request Speech Recognition {0}".format(e))
except HTTPError as e:
if e.response.status_code == 401:
text = "pair my device"
LOG.warn("Access Denied at mycroft.ai")
except Exception as e:
LOG.error(e)
LOG.error("Speech Recognition could not understand audio")
self.__speak("Sorry, I didn't catch that")
if text:
payload = {
'utterances': [text],
'session': SessionManager.get().session_id
}
self.emitter.emit("recognizer_loop:utterance", payload)
self.metrics.attr('utterances', [text])
示例14: _upload_wake_word
def _upload_wake_word(self, audio):
ww_module = self.wake_word_recognizer.__class__.__name__
if ww_module == 'PreciseHotword':
model_path = self.wake_word_recognizer.precise_model
with open(model_path, 'rb') as f:
model_hash = md5(f.read()).hexdigest()
else:
model_hash = '0'
metadata = {
'name': self.wake_word_name.replace(' ', '-'),
'engine': md5(ww_module.encode('utf-8')).hexdigest(),
'time': str(int(1000 * get_time())),
'sessionId': SessionManager.get().session_id,
'accountId': self.account_id,
'model': str(model_hash)
}
requests.post(
self.upload_url, files={
'audio': BytesIO(audio.get_wav_data()),
'metadata': StringIO(json.dumps(metadata))
}
)
示例15: wake_up
def wake_up(self, audio):
if self.wakeup_recognizer.found_wake_word(audio.frame_data):
SessionManager.touch()
self.state.sleeping = False
self.emitter.emit('recognizer_loop:awoken')
self.metrics.increment("mycroft.wakeup")