当前位置: 首页>>代码示例>>Python>>正文


Python SessionManager.get方法代码示例

本文整理汇总了Python中mycroft.session.SessionManager.get方法的典型用法代码示例。如果您正苦于以下问题:Python SessionManager.get方法的具体用法?Python SessionManager.get怎么用?Python SessionManager.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mycroft.session.SessionManager的用法示例。


在下文中一共展示了SessionManager.get方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: process

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def process(self, audio):
        SessionManager.touch()
        payload = {
            'utterance': self.wakeword_recognizer.key_phrase,
            'session': SessionManager.get().session_id,
        }
        self.emitter.emit("recognizer_loop:wakeword", payload)

        if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
            LOG.warning("Audio too short to be processed")
        else:
            stopwatch = Stopwatch()
            with stopwatch:
                transcription = self.transcribe(audio)
            if transcription:
                ident = str(stopwatch.timestamp) + str(hash(transcription))
                # STT succeeded, send the transcribed speech on for processing
                payload = {
                    'utterances': [transcription],
                    'lang': self.stt.lang,
                    'session': SessionManager.get().session_id,
                    'ident': ident
                }
                self.emitter.emit("recognizer_loop:utterance", payload)
                self.metrics.attr('utterances', [transcription])
            else:
                ident = str(stopwatch.timestamp)
            # Report timing metrics
            report_timing(ident, 'stt', stopwatch,
                          {'transcription': transcription,
                           'stt': self.stt.__class__.__name__})
开发者ID:Dark5ide,项目名称:mycroft-core,代码行数:33,代码来源:listener.py

示例2: process_wake_word

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def process_wake_word(self, audio, timer):
        hyp = self.mycroft_recognizer.transcribe(audio.frame_data,
                                                 self.metrics)

        if self.mycroft_recognizer.contains(hyp):
            extractor = WordExtractor(audio, self.mycroft_recognizer,
                                      self.metrics)
            timer.lap()
            extractor.calculate_range()
            self.metrics.timer("mycroft.recognizer.extractor.time_s",
                               timer.lap())
            audio_before = extractor.get_audio_data_before()
            self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
                               self._audio_length(audio_before))
            audio_after = extractor.get_audio_data_after()
            self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
                               self._audio_length(audio_after))

            SessionManager.touch()
            payload = {
                'utterance': hyp.hypstr,
                'session': SessionManager.get().session_id,
                'pos_begin': extractor.begin,
                'pos_end': extractor.end
            }
            self.emitter.emit("recognizer_loop:wakeword", payload)

            try:
                self.transcribe([audio_before, audio_after])
            except sr.UnknownValueError:
                self.__speak("Go ahead")
                self.state.skip_wakeword = True
                self.metrics.increment("mycroft.wakeword")
开发者ID:Irrelon,项目名称:mycroft-core,代码行数:35,代码来源:listener.py

示例3: transcribe

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
 def transcribe(self, audio):
     text = None
     try:
         # Invoke the STT engine on the audio clip
         text = self.stt.execute(audio).lower().strip()
         LOG.debug("STT: " + text)
     except sr.RequestError as e:
         LOG.error("Could not request Speech Recognition {0}".format(e))
     except ConnectionError as e:
         LOG.error("Connection Error: {0}".format(e))
         self.emitter.emit("recognizer_loop:no_internet")
     except HTTPError as e:
         if e.response.status_code == 401:
             text = "pair my device"  # phrase to start the pairing process
             LOG.warning("Access Denied at mycroft.ai")
     except Exception as e:
         LOG.error(e)
         LOG.error("Speech Recognition could not understand audio")
     if text:
         # STT succeeded, send the transcribed speech on for processing
         payload = {
             'utterances': [text],
             'lang': self.stt.lang,
             'session': SessionManager.get().session_id
         }
         self.emitter.emit("recognizer_loop:utterance", payload)
         self.metrics.attr('utterances', [text])
开发者ID:aatchison,项目名称:mycroft-core,代码行数:29,代码来源:listener.py

示例4: transcribe

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def transcribe(self, audio_segments):
        utterances = []
        threads = []
        if connected():
            for audio in audio_segments:
                if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
                    logger.debug("Audio too short to send to STT")
                    continue

                target = self._create_remote_stt_runnable(audio, utterances)
                t = threading.Thread(target=target)
                t.start()
                threads.append(t)

            for thread in threads:
                thread.join()
            if len(utterances) > 0:
                payload = {
                    'utterances': utterances,
                    'session': SessionManager.get().session_id
                }
                self.emitter.emit("recognizer_loop:utterance", payload)
                self.metrics.attr('utterances', utterances)
            else:
                raise sr.UnknownValueError
        else:  # TODO: Localization
            self.__speak("This device is not connected to the Internet")
开发者ID:bisaysavath,项目名称:mycroft-core,代码行数:29,代码来源:listener.py

示例5: try_consume_audio

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def try_consume_audio(self):
        timer = Stopwatch()
        hyp = None
        audio = self.queue.get()
        self.metrics.timer("mycroft.recognizer.audio.length_s", self._audio_length(audio))
        self.queue.task_done()
        timer.start()
        if self.state.sleeping:
            hyp = self.wakeup_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
            if hyp and hyp.hypstr:
                logger.debug("sleeping recognition: " + hyp.hypstr)
            if hyp and hyp.hypstr.lower().find("wake up") >= 0:
                SessionManager.touch()
                self.state.sleeping = False
                self.__speak("I'm awake.")  # TODO: Localization
                self.metrics.increment("mycroft.wakeup")
        else:
            if not self.state.skip_wakeword:
                hyp = self.ww_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)

            if hyp and hyp.hypstr.lower().find("mycroft") >= 0:
                extractor = WakewordExtractor(audio, self.ww_recognizer, self.metrics)
                timer.lap()
                extractor.calculate_range()
                self.metrics.timer("mycroft.recognizer.extractor.time_s", timer.lap())
                audio_before = extractor.get_audio_data_before()
                self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_before))
                audio_after = extractor.get_audio_data_after()
                self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_after))

                SessionManager.touch()
                payload = {
                    'utterance': hyp.hypstr,
                    'session': SessionManager.get().session_id,
                    'pos_begin': int(extractor.range.begin),
                    'pos_end': int(extractor.range.end)
                }
                self.emitter.emit("recognizer_loop:wakeword", payload)

                try:
                    self.transcribe([audio_before, audio_after])
                except sr.UnknownValueError:
                    self.__speak("Go ahead")
                    self.state.skip_wakeword = True
                    self.metrics.increment("mycroft.wakeword")

            elif self.state.skip_wakeword:
                SessionManager.touch()
                try:
                    self.transcribe([audio])
                except sr.UnknownValueError:
                    logger.warn("Speech Recognition could not understand audio")
                    self.__speak("Sorry, I didn't catch that.")
                    self.metrics.increment("mycroft.recognizer.error")
                self.state.skip_wakeword = False
            else:
                self.metrics.clear()
        self.metrics.flush()
开发者ID:BK-University,项目名称:mycroft-core,代码行数:60,代码来源:listener.py

示例6: publish

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
 def publish(self, events):
     if 'session_id' not in events:
         session_id = SessionManager.get().session_id
         events['session_id'] = session_id
     if self.enabled:
         requests.post(
             self.url,
             headers={'Content-Type': 'application/json'},
             data=json.dumps(events), verify=False)
开发者ID:seymour-bootay,项目名称:mycroft-core,代码行数:11,代码来源:__init__.py

示例7: process_audio

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
 def process_audio(self, audio):
     SessionManager.touch()
     payload = {
         'utterance': self.mycroft_recognizer.key_phrase,
         'session': SessionManager.get().session_id,
     }
     self.emitter.emit("recognizer_loop:wakeword", payload)
     try:
         self.transcribe([audio])
     except sr.UnknownValueError:  # TODO: Localization
         logger.warn("Speech Recognition could not understand audio")
         self.__speak("Sorry, I didn't catch that.")
开发者ID:Acidburn0zzz,项目名称:mycroft-core,代码行数:14,代码来源:listener.py

示例8: process

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def process(self, audio):
        SessionManager.touch()
        payload = {
            'utterance': self.wakeword_recognizer.key_phrase,
            'session': SessionManager.get().session_id,
        }
        self.emitter.emit("recognizer_loop:wakeword", payload)

        if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
            LOG.warning("Audio too short to be processed")
        else:
            self.transcribe(audio)
开发者ID:aatchison,项目名称:mycroft-core,代码行数:14,代码来源:listener.py

示例9: process

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def process(self, audio):
        SessionManager.touch()
        payload = {
            'utterance': self.mycroft_recognizer.key_phrase,
            'session': SessionManager.get().session_id,
        }
        self.emitter.emit("recognizer_loop:wakeword", payload)

        if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
            LOG.warn("Audio too short to be processed")
        elif connected():
            self.transcribe(audio)
        else:
            self.__speak("Mycroft seems not to be connected to the Internet")
开发者ID:forslund,项目名称:mycroft-core,代码行数:16,代码来源:listener.py

示例10: transcribe

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
 def transcribe(self, audio):
     text = None
     try:
         text = self.stt.execute(audio).lower().strip()
         LOG.debug("STT: " + text)
     except sr.RequestError as e:
         LOG.error("Could not request Speech Recognition {0}".format(e))
     except HTTPError as e:
         if e.response.status_code == 401:
             text = "pair my device"
             LOG.warn("Access Denied at mycroft.ai")
     except Exception as e:
         LOG.error(e)
         LOG.error("Speech Recognition could not understand audio")
         self.__speak("Sorry, I didn't catch that")
     if text:
         payload = {
             'utterances': [text],
             'session': SessionManager.get().session_id
         }
         self.emitter.emit("recognizer_loop:utterance", payload)
         self.metrics.attr('utterances', [text])
开发者ID:forslund,项目名称:mycroft-core,代码行数:24,代码来源:listener.py

示例11: _upload_wake_word

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def _upload_wake_word(self, audio):
        ww_module = self.wake_word_recognizer.__class__.__name__
        if ww_module == 'PreciseHotword':
            model_path = self.wake_word_recognizer.precise_model
            with open(model_path, 'rb') as f:
                model_hash = md5(f.read()).hexdigest()
        else:
            model_hash = '0'

        metadata = {
            'name': self.wake_word_name.replace(' ', '-'),
            'engine': md5(ww_module.encode('utf-8')).hexdigest(),
            'time': str(int(1000 * get_time())),
            'sessionId': SessionManager.get().session_id,
            'accountId': self.account_id,
            'model': str(model_hash)
        }
        requests.post(
            self.upload_url, files={
                'audio': BytesIO(audio.get_wav_data()),
                'metadata': StringIO(json.dumps(metadata))
            }
        )
开发者ID:Dark5ide,项目名称:mycroft-core,代码行数:25,代码来源:mic.py

示例12: __speak

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
 def __speak(self, utterance):
     payload = {
         'utterance': utterance,
         'session': SessionManager.get().session_id
     }
     self.emitter.emit("speak", Message("speak", metadata=payload))
开发者ID:Irrelon,项目名称:mycroft-core,代码行数:8,代码来源:listener.py

示例13: target

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
 def target():
     self.emitter.emit(
         "speak",
         Message("speak",
                 metadata={'utterance': utterance,
                           'session': SessionManager.get().session_id}))
开发者ID:Alphacodeclub,项目名称:mycroft-core,代码行数:8,代码来源:listener.py

示例14: _wait_until_wake_word

# 需要导入模块: from mycroft.session import SessionManager [as 别名]
# 或者: from mycroft.session.SessionManager import get [as 别名]
    def _wait_until_wake_word(self, source, sec_per_buffer):
        """Listen continuously on source until a wake word is spoken

        Args:
            source (AudioSource):  Source producing the audio chunks
            sec_per_buffer (float):  Fractional number of seconds in each chunk
        """
        num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
                               source.SAMPLE_WIDTH)

        silence = '\0' * num_silent_bytes

        # bytearray to store audio in
        byte_data = silence

        buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
        buffers_since_check = 0.0

        # Max bytes for byte_data before audio is removed from the front
        max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
        test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)

        said_wake_word = False

        # Rolling buffer to track the audio energy (loudness) heard on
        # the source recently.  An average audio energy is maintained
        # based on these levels.
        energies = []
        idx_energy = 0
        avg_energy = 0.0
        energy_avg_samples = int(5 / sec_per_buffer)  # avg over last 5 secs

        counter = 0

        while not said_wake_word and not self._stop_signaled:
            if self._skip_wake_word():
                break
            chunk = self.record_sound_chunk(source)

            energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
            if energy < self.energy_threshold * self.multiplier:
                self._adjust_threshold(energy, sec_per_buffer)

            if len(energies) < energy_avg_samples:
                # build the average
                energies.append(energy)
                avg_energy += float(energy) / energy_avg_samples
            else:
                # maintain the running average and rolling buffer
                avg_energy -= float(energies[idx_energy]) / energy_avg_samples
                avg_energy += float(energy) / energy_avg_samples
                energies[idx_energy] = energy
                idx_energy = (idx_energy + 1) % energy_avg_samples

                # maintain the threshold using average
                if energy < avg_energy * 1.5:
                    if energy > self.energy_threshold:
                        # bump the threshold to just above this value
                        self.energy_threshold = energy * 1.2

            # Periodically output energy level stats.  This can be used to
            # visualize the microphone input, e.g. a needle on a meter.
            if counter % 3:
                with open(self.mic_level_file, 'w') as f:
                    f.write("Energy:  cur=" + str(energy) + " thresh=" +
                            str(self.energy_threshold))
                f.close()
            counter += 1

            # At first, the buffer is empty and must fill up.  After that
            # just drop the first chunk bytes to keep it the same size.
            needs_to_grow = len(byte_data) < max_size
            if needs_to_grow:
                byte_data += chunk
            else:  # Remove beginning of audio and add new chunk to end
                byte_data = byte_data[len(chunk):] + chunk

            buffers_since_check += 1.0
            if buffers_since_check > buffers_per_check:
                buffers_since_check -= buffers_per_check
                chopped = byte_data[-test_size:] \
                    if test_size < len(byte_data) else byte_data
                audio_data = chopped + silence
                said_wake_word = \
                    self.wake_word_recognizer.found_wake_word(audio_data)
                # if a wake word is success full then record audio in temp
                # file.
                if self.save_wake_words and said_wake_word:
                    audio = self._create_audio_data(byte_data, source)
                    stamp = str(int(1000 * get_time()))
                    uid = SessionManager.get().session_id
                    if not isdir(self.save_wake_words_dir):
                        mkdir(self.save_wake_words_dir)

                    dr = self.save_wake_words_dir
                    ww = self.wake_word_name.replace(' ', '-')
                    filename = join(dr, ww + '.' + stamp + '.' + uid + '.wav')
                    with open(filename, 'wb') as f:
                        f.write(audio.get_wav_data())

#.........这里部分代码省略.........
开发者ID:aatchison,项目名称:mycroft-core,代码行数:103,代码来源:mic.py


注:本文中的mycroft.session.SessionManager.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。