当前位置: 首页>>代码示例>>C#>>正文


C# SpeechRecognitionEngine.UpdateRecognizerSetting方法代码示例

本文整理汇总了C#中SpeechRecognitionEngine.UpdateRecognizerSetting方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.UpdateRecognizerSetting方法的具体用法?C# SpeechRecognitionEngine.UpdateRecognizerSetting怎么用?C# SpeechRecognitionEngine.UpdateRecognizerSetting使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在SpeechRecognitionEngine的用法示例。


在下文中一共展示了SpeechRecognitionEngine.UpdateRecognizerSetting方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: SpeechRecognizer

    /*
     * SpeechRecognizer
     *
     * @param GName - grammar file name
     */
    public SpeechRecognizer(string GName, int minConfidence)
    {
        //creates the speech recognizer engine
        sr = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-PT"));
        sr.SetInputToDefaultAudioDevice();
        Console.WriteLine("confiança : " + minConfidence);
        sr.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", minConfidence);

        Grammar gr = null;

        //verifies if file exist, and loads the Grammar file, else load defualt grammar
        if (System.IO.File.Exists(GName))
        {
            gr = new Grammar(GName);
            gr.Enabled = true;
        }
        else
            Console.WriteLine("Can't read grammar file");

        //load Grammar to speech engine
        sr.LoadGrammar(gr);

        //assigns a method, to execute when speech is recognized
        sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);

        //assigns a method, to execute when speech is NOT recognized
        sr.SpeechRecognitionRejected +=
          new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejected);

        // Start asynchronous, continuous speech recognition.
        sr.RecognizeAsync(RecognizeMode.Multiple);
    }
开发者ID:Felgueiras,项目名称:IM-Calculator,代码行数:37,代码来源:SpeechRecognizer.cs

示例2: NMInput

        //I know that GrammarLoaded is bad, but there's no good way to get the delegate surfaced out of AerInput in to AerTalk yet.
        // This could be solved with a service registry, but I haven't thought that through yet.
        // It could also be solved by using RecognitionEngine.LoadGrammar() instead of the Async version again, but
        // I rather like the async version.
        public NMInput(string pathToGrammar = @"Grammars\", EventHandler<LoadGrammarCompletedEventArgs> GrammarLoaded = null)
        {
            LoadSettings();
            try
            {
                RecognitionEngine = new SpeechRecognitionEngine(new CultureInfo(_CultureInfo));
            }
            catch (ArgumentException e)
            {
                NMDebug.LogError("Could not load speech recognizer with the current Culture settings (" + _CultureInfo + "), using default");
                RecognitionEngine = new SpeechRecognitionEngine();
            }

            RecognitionEngine.SetInputToDefaultAudioDevice();
            LoadGrammar(pathToGrammar, GrammarLoaded);
            RecognitionEngine.SpeechRecognized += this.SpeechRecognized_Handler;

            RecognitionEngine.UpdateRecognizerSetting("ResponseSpeed", _ResponseSpeed);
            NewInput = false;
            RecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
开发者ID:antrys,项目名称:Navigation-Matrix,代码行数:25,代码来源:NMInput.cs

示例3: CreateSpeechRecognizer

 private void CreateSpeechRecognizer(KinectSensor sensor)
 {
     if (this.speechEngine.ContainsKey(sensor)) return;
     RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers()
         .Where(r => r.Culture.Name == "en-US").FirstOrDefault();
     if (ri == null)
     {
         return;
     }
     SpeechRecognitionEngine speech = new SpeechRecognitionEngine(ri.Id);
     var words = new Choices();
     words.Add("start");
     words.Add("next");
     words.Add("continue");
     words.Add("pause");
     words.Add("stop");
     var gb = new GrammarBuilder();
     gb.Culture = ri.Culture;
     gb.Append(words);
     var g = new Grammar(gb);
     speech.LoadGrammar(g);
     speech.SpeechRecognized += SpeechRecognized;
     // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
     // This will prevent recognition accuracy from degrading over time.
     speech.UpdateRecognizerSetting("AdaptationOn", 0);
     speech.SetInputToAudioStream(sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
     speech.RecognizeAsync(RecognizeMode.Multiple);
     this.speechEngine[sensor] = speech;
     this.helpbox.Text = "Keyboard shortcuts: space; Speech commands: start, next, pause, continue, stop.";
 }
开发者ID:Zillode,项目名称:OSCeleton-KinectSDK,代码行数:30,代码来源:MainWindow.xaml.cs

示例4: launchVoiceRecognizer

        public void launchVoiceRecognizer()
        {
            //Get the info of the voice recognizer engine the user wants to use
            //RecognizerInfo recognizer = GetKinectRecognizer();
            RecognizerInfo recognizer = null;
            ReadOnlyCollection<RecognizerInfo> allRecognizers = SpeechRecognitionEngine.InstalledRecognizers();
            for (int i = 0; i < allRecognizers.Count; i++)
            {
                if (allRecognizers[i].Id == server.serverMasterOptions.audioOptions.recognizerEngineID)
                {
                    recognizer = allRecognizers[i];
                    break;
                }
            }
            if (recognizer == null)
            {
                throw new Exception("Couldn't find voice recognizer core.");
            }

            //Wait 4 seconds for the Kinect to be ready, may not be necessary, but the sample does this
            //Thread.Sleep(4000);

            engine = new SpeechRecognitionEngine(server.serverMasterOptions.audioOptions.recognizerEngineID);
            Choices vocab = new Choices();
            for (int i = 0; i < server.serverMasterOptions.voiceCommands.Count; i++)
            {
                vocab.Add(server.serverMasterOptions.voiceCommands[i].recognizedWord);
            }

            GrammarBuilder gb = new GrammarBuilder { Culture = recognizer.Culture };
            gb.Append(vocab);
            Grammar grammar = new Grammar(gb);
            engine.LoadGrammar(grammar);

            //Setup events
            engine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);
            engine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(engine_SpeechHypothesized);
            engine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(engine_SpeechRecognitionRejected);

            //According to the speech recognition sample, this turns off adaptation of the acoustical mode, which can degrade recognizer accuracy over time
            engine.UpdateRecognizerSetting("AdaptationOn", 0);

            if (server.serverMasterOptions.audioOptions.sourceID >= 0 && server.serverMasterOptions.audioOptions.sourceID < server.kinects.Count)
            {
                KinectAudioSource source = server.kinects[server.serverMasterOptions.audioOptions.sourceID].kinect.AudioSource;
                audioStream = source.Start();
                engine.SetInputToAudioStream(audioStream, new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(Microsoft.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            }
            else
            {
                engine.SetInputToDefaultAudioDevice();
            }

            engine.RecognizeAsync(RecognizeMode.Multiple);
        }
开发者ID:simonmssu,项目名称:KiwiVR,代码行数:55,代码来源:VoiceRecogCore.cs

示例5: KinectInterface

        public KinectInterface()
        {
            called = false;
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit (See components in Toolkit Browser).
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null != this.sensor)
            {
                try
                {
                    // Start the sensor!
                    this.sensor.Start();
                }
                catch (IOException)
                {
                    // Some other application is streaming from the same Kinect sensor
                    this.sensor = null;
                }
            }

            RecognizerInfo ri = GetKinectRecognizer();

            if (null != ri)
            {
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                var choices = new Choices();
                choices.Add(new SemanticResultValue("turn lamp on", "LAMP ON"));
                choices.Add(new SemanticResultValue("turn the lamp on", "LAMP ON"));
                choices.Add(new SemanticResultValue("turn light on", "LAMP ON"));
                choices.Add(new SemanticResultValue("turn the light on", "LAMP ON"));
                choices.Add(new SemanticResultValue("turn the Table Lamp on", "LAMP ON"));
                choices.Add(new SemanticResultValue("turn Table Lamp on", "LAMP ON"));
                choices.Add(new SemanticResultValue("turn lamp off", "LAMP OFF"));
                choices.Add(new SemanticResultValue("turn the lamp of", "LAMP OFF"));
                choices.Add(new SemanticResultValue("turn light off", "LAMP OFF"));
                choices.Add(new SemanticResultValue("turn the light off", "LAMP OFF"));
                choices.Add(new SemanticResultValue("turn the Table Lamp off", "LAMP OFF"));
                choices.Add(new SemanticResultValue("turn Table Lamp off", "LAMP OFF"));
                choices.Add(new SemanticResultValue("turn printer on", "PRINTER ON"));
                choices.Add(new SemanticResultValue("turn the printer on", "PRINTER ON"));
                choices.Add(new SemanticResultValue("turn 3D printer on", "PRINTER ON"));
                choices.Add(new SemanticResultValue("turn the 3D printer on", "PRINTER ON"));
                choices.Add(new SemanticResultValue("turn printer off", "PRINTER OFF"));
                choices.Add(new SemanticResultValue("turn the printer off", "PRINTER OFF"));
                choices.Add(new SemanticResultValue("turn 3D printer off", "PRINTER OFF"));
                choices.Add(new SemanticResultValue("turn the 3D printer off", "PRINTER OFF"));
                choices.Add(new SemanticResultValue("computer", "COMPUTER"));
                choices.Add(new SemanticResultValue("house", "COMPUTER"));
                choices.Add(new SemanticResultValue("home", "COMPUTER"));

                var gb = new GrammarBuilder { Culture = ri.Culture };
                gb.Append(choices);

                var g = new Grammar(gb);

                speechEngine.LoadGrammar(g);

                /*// Create a grammar from grammar definition XML file.
                using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
                {
                    var g = new Grammar(memoryStream);
                    speechEngine.LoadGrammar(g);
                }*/

                speechEngine.SpeechRecognized += SpeechRecognized;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                speechEngine.SetInputToAudioStream(
                    sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
        }
开发者ID:KyleARector,项目名称:SmartHome,代码行数:86,代码来源:Program.cs

示例6: KinectStart

        /// <summary>
        /// Initialize Kinect with words.
        /// </summary>
        protected void KinectStart()
        {
            RecognizerInfo ri = GetKinectRecognizer();

            if (null != ri)
            {
                //Populate the speech engine with keywords we are interested in.
                this.speechEngine = new SpeechRecognitionEngine(ri.Id);

                var gb = new GrammarBuilder { Culture = ri.Culture };

                //Make the path point to current directory
                string path = Directory.GetCurrentDirectory();
                path += "\\";
                path += pathToXML;

                if (pathToXML != null)
                {
                    gb.AppendRuleReference(path);
                }
                else if (dictionary != null)
                    gb.Append(dictionary);
                else
                    throw new NullReferenceException();

                var g = new Grammar(gb);

                speechEngine.LoadGrammar(g);

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
                // This will prevent recognition accuracy from degrading over time.
                speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.audioStream = sensor.AudioSource.Start();

                speechEngine.SetInputToAudioStream(
                   this.audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                //speechEngine.RecognizeAsync(RecognizeMode.Multiple);
                kinnectStatus = true;
            }
            else
            {
                //Speech Recognization not found
            }
        }
开发者ID:Wisc-HCI,项目名称:robot-gaze-aversion,代码行数:48,代码来源:KinectAudio.cs

示例7: GetEngine

        public SpeechRecognitionEngine GetEngine()
        {
            if (recognizer != null) {
            return recognizer;
              }

              logInfo("ENGINE", "Init recognizer");
              recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo(WSRConfig.GetInstance().language));
              recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
              recognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);
              recognizer.AudioStateChanged += new EventHandler<AudioStateChangedEventArgs>(recognizer_AudioStateChanged);
              recognizer.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(recognizer_SpeechHypothesized);

              // Alternate
              recognizer.MaxAlternates = 2;
              logInfo("ENGINE", "MaxAlternates: " + recognizer.MaxAlternates);

              // Deep configuration
              if (!WSRConfig.GetInstance().adaptation) {
            recognizer.UpdateRecognizerSetting("AdaptationOn", 0);
              }

              // Set the input to the recognizer.
              if (!SetupDevice(recognizer)) {
            try {
              recognizer.SetInputToDefaultAudioDevice();
            }
            catch (InvalidOperationException ex) {
              logInfo("ENGINE", "No default input device: " + ex.Message);
            }
              }

              return recognizer;
        }
开发者ID:Oniric75,项目名称:WSRMacro,代码行数:34,代码来源:WSRMacro.cs

示例8: Main

        static void Main(string[] args)
        {
            AppDomain.CurrentDomain.UnhandledException += new UnhandledExceptionEventHandler(CurrentDomain_UnhandledException);

            voice = new Voice();

            commandProcessor = ConfigureCommands().CreateCommandProcessor();
            commandProcessor.CommandRecognized += sound.NotifyRecognizedCommandAsync;
            commandProcessor.CommandRejected += sound.NotifyUnrecognizedCommandAsync;

            Console.WriteLine("Attached PIR-1 devices:");
            foreach (var pir in PIRDriver.Instance.QueryAttachedDevices())
                Console.WriteLine("\t{0}", pir);

            ConfigureLightShow();
            Console.WriteLine("Configured LightShow");

            var recognizer = GetKinectRecognizer();
            using (var sensor = GetKinectSensor())
            {
                /* Skeleton-based beam control is disabled due to an OOM issue when long running.
                var beamController = new SkeletonBasedBeamControl();
                beamController.AttentionGestureDetected += delegate(SkeletonBasedBeamControl controller)
                {
                    sound.NotifyAttentionGestureRecognized();
                };
                beamController.Start(sensor);
                */

                sensor.Start();
                var source = sensor.AudioSource;

                source.AutomaticGainControlEnabled = false;
                source.EchoCancellationMode = EchoCancellationMode.None;
                source.NoiseSuppression = true;

                Console.WriteLine("Using: {0}", recognizer.Name);

                using (Stream s = source.Start())
                {
                    SpeechRecognitionEngine sre = null;
                    var sreLock = new object();

                    EventHandler<SpeechDetectedEventArgs> SreSpeechDetected = delegate(object sender, SpeechDetectedEventArgs dea) { SpeechDetected(source, dea); };

                    Action startRecognizer = delegate()
                    {
                        SpeechRecognitionEngine oldSre = null;

                        lock (sreLock)
                        {
                            if (sre != null)
                            {
                                oldSre = sre;
                            }
                            sre = new SpeechRecognitionEngine(recognizer.Id);
                            sre.UpdateRecognizerSetting("AdaptationOn", 1);
                            sre.UpdateRecognizerSetting("PersistedBackgroundAdaptation", 1);
                            sre.LoadGrammar(commandProcessor.CreateGrammar());

                            sre.SpeechDetected += SreSpeechDetected;
                            sre.SpeechHypothesized += SreSpeechHypothesized;
                            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
                            sre.AudioSignalProblemOccurred += SreAudioSignalProblemOccurred;

                            sre.EndSilenceTimeoutAmbiguous = TimeSpan.FromMilliseconds(AmbiguousSilenceTimeout);
                            sre.EndSilenceTimeout = TimeSpan.FromMilliseconds(UnambiguousSilenceTimeout);

                            sre.SpeechRecognized += delegate(object sender, SpeechRecognizedEventArgs r)
                            {
                                Console.WriteLine("Handling text {0} in command processor", r.Result.Text);
                                try
                                {
                                    commandProcessor.ProcessSpeech(r.Result);
                                }
                                catch (Exception ex)
                                {
                                    Console.WriteLine("Command handler failed: " + ex.ToString());
                                    voice.SpeakAsync("Failed to execute command. Sorry!");
                                }
                            };

                            sre.SetInputToAudioStream(s,
                                                      new SpeechAudioFormatInfo(
                                                          EncodingFormat.Pcm, 16000, 16, 1,
                                                          32000, 2, null));
                            sre.RecognizeAsync(RecognizeMode.Multiple);
                            Trace.TraceInformation("New recognizer started");

                            if (oldSre != null)
                            {
                                oldSre.RecognizeAsyncStop();

                                oldSre.SpeechDetected -= SreSpeechDetected;
                                oldSre.SpeechHypothesized -= SreSpeechHypothesized;
                                oldSre.SpeechRecognitionRejected -= SreSpeechRecognitionRejected;
                                oldSre.AudioSignalProblemOccurred -= SreAudioSignalProblemOccurred;

                                oldSre.Dispose();
                                Trace.TraceInformation("Old recognizer disposed");
//.........这里部分代码省略.........
开发者ID:rdodgen,项目名称:Ezri,代码行数:101,代码来源:Program.cs

示例9: InitializeSpeechEngine

        void InitializeSpeechEngine(SpeechRecognitionEngine sre)
        {
            // Log function entrance
            TraceLog.TraceFunction();

            try
            {
                // initialize and cache format info
                formatInfo = new SpeechAudioFormatInfo(defaultSampleRate, defaultBitsPerSample, defaultAudioChannels);

                // initialize and cache speech engine
                sre.UpdateRecognizerSetting("AssumeCFGFromTrustedSource", 1);

                string fileName = @"TELLME-SMS-LM.cfgp";
                string appDataPath = HttpContext.Current.Server.MapPath("~/Content/grammars");
                string grammarPath = Path.Combine(appDataPath, fileName);
                TraceLog.TraceInfo("Grammar path: " + grammarPath);

                // make sure the grammar files are copied over from the approot directory to the appDataPath
                InitializeGrammar(grammarPath, appDataPath, fileName);

                // initialize and load the grammar
                Grammar grammar = new Grammar(grammarPath);
                grammar.Enabled = true;
                sre.LoadGrammar(grammar);
            }
            catch (Exception ex)
            {
                TraceLog.TraceError("Speech Engine initialization failed: " + ex.Message);
            }
        }
开发者ID:ogazitt,项目名称:zaplify,代码行数:31,代码来源:SpeechResource.cs

示例10: MainWindow


//.........这里部分代码省略.........

                GrammarBuilder mouse = new GrammarBuilder { Culture = ri.Culture };
                // Any window
                mouse.Append(new Choices("mouse mode"));
                var mg = new Grammar(mouse);
                this.speechEngine.LoadGrammar(mg);



                GrammarBuilder click = new GrammarBuilder { Culture = ri.Culture };
                click.Append(new Choices("click", "double click", "right click"));
                var clickGram = new Grammar(click);
                this.speechEngine.LoadGrammar(clickGram);

                GrammarBuilder go = new GrammarBuilder { Culture = ri.Culture };
                go.Append(new Choices("lets hack", "shut it down"));
                var goGram = new Grammar(go);
                this.speechEngine.LoadGrammar(goGram);







                this.speechEngine.SpeechRecognized += this.SpeechRecognized;
                this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;

                // let the convertStream know speech is going active
                this.convertStream.SpeechActive = true;

                // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model. 
                // This will prevent recognition accuracy from degrading over time.
                speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);

                this.speechEngine.SetInputToAudioStream(
                    this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
                this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            else
            {
                this.StatusText = "No recognizer";
            }

            // Create the drawing group we'll use for drawing
            this.drawingGroup = new DrawingGroup();

            // Create an image source that we can use in our image control
            this.imageSource = new DrawingImage(this.drawingGroup);

            // use the window object as the view model in this simple example
            this.DataContext = this;

            // initialize the components (controls) of the window
            this.InitializeComponent();

            KnockSegment1 knockSegment1 = new KnockSegment1();
            KnockSegment2 knockSegment2 = new KnockSegment2();
            KnockSegment3 knockSegment3 = new KnockSegment3();
            SlapSegment1 slapSegment1 = new SlapSegment1();
            SlapSegment2 slapSegment2 = new SlapSegment2();
            PokeSegment1 pokeSegment1 = new PokeSegment1();
            PokeSegment2 pokeSegment2 = new PokeSegment2();

            IGestureSegment[] knock = new IGestureSegment[]
            {
开发者ID:TomWerner,项目名称:HandsOnInterface,代码行数:67,代码来源:MainWindow.xaml.cs


注:本文中的SpeechRecognitionEngine.UpdateRecognizerSetting方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。