当前位置: 首页>>代码示例>>C#>>正文


C# SpeechRecognizer.CompileConstraintsAsync方法代码示例

本文整理汇总了C#中SpeechRecognizer.CompileConstraintsAsync方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognizer.CompileConstraintsAsync方法的具体用法?C# SpeechRecognizer.CompileConstraintsAsync怎么用?C# SpeechRecognizer.CompileConstraintsAsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在SpeechRecognizer的用法示例。


在下文中一共展示了SpeechRecognizer.CompileConstraintsAsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: StartVoiceRecognition

		private async void StartVoiceRecognition()
		{
			await SpeakText( "Say Captains Log at any time to create a log entry." );

			speechRecognizerCaptainsLogCommand = new SpeechRecognizer();

			while ( !cancellationSource.IsCancellationRequested )
			{
				// Listen for user to say "Captains Log"
				ISpeechRecognitionConstraint commandConstraint = 
					new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } );
				speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint );
				await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync();

				SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync();

				if ( commandResult.Status != SpeechRecognitionResultStatus.Success
					|| commandResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| cancellationSource.IsCancellationRequested )
				{
					continue;
				}
				// Recognized user saying "Captains Log"

				// Listen for the user's dictation entry
				var captainsLogDictationRecognizer = new SpeechRecognizer();

				ISpeechRecognitionConstraint dictationConstraint = 
					new SpeechRecognitionTopicConstraint( 
						SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" );

				captainsLogDictationRecognizer.Constraints.Add( dictationConstraint );

				await captainsLogDictationRecognizer.CompileConstraintsAsync();

				captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before.";
				captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead";
				captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true;
				captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true;

				SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync();

				if ( dictationResult.Status != SpeechRecognitionResultStatus.Success
					|| dictationResult.Confidence == SpeechRecognitionConfidence.Rejected
					|| string.IsNullOrWhiteSpace( dictationResult.Text )
					|| cancellationSource.IsCancellationRequested )
				{
					captainsLogDictationRecognizer.Dispose();

					continue;
				}
				// Recognized user's dictation entry

				AddLogEntry( dictationResult.Text );

				captainsLogDictationRecognizer.Dispose();
			}

			speechRecognizerCaptainsLogCommand.Dispose();
		}
开发者ID:dannydwarren,项目名称:Cortana-Location-UWP,代码行数:60,代码来源:RecordNotesPage.xaml.cs

示例2: InitSpeechRecognition

        private async void InitSpeechRecognition()
        {
            try
            {

                if (speechRecognizerContinuous == null)
                {
                    speechRecognizerContinuous = new SpeechRecognizer();
                    speechRecognizerContinuous.Constraints.Add(
                        new SpeechRecognitionListConstraint(
                            new List<String>() { "Start Listening" }, "start"));
                    SpeechRecognitionCompilationResult contCompilationResult =
                        await speechRecognizerContinuous.CompileConstraintsAsync();


                    if (contCompilationResult.Status != SpeechRecognitionResultStatus.Success)
                    {
                        throw new Exception();
                    }
                    speechRecognizerContinuous.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
                }

                await speechRecognizerContinuous.ContinuousRecognitionSession.StartAsync();
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine(ex.Message);
            }
        }
开发者ID:arkiq,项目名称:myCortana,代码行数:29,代码来源:gamePage.xaml.cs

示例3: SpeechRecognitionService

 private SpeechRecognitionService()
 {
     _recognizer = new SpeechRecognizer();
     _recognizer.Constraints.Add(new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch,
         "webSearch"));
     _recognizer.CompileConstraintsAsync().AsTask().Wait();
     _recognizer.ContinuousRecognitionSession.ResultGenerated += RecognitionFound;
 }
开发者ID:veler,项目名称:MirrorSUPINFO,代码行数:8,代码来源:SpeechRecognitionService.cs

示例4: MainPage

 public MainPage()
 {
     this.InitializeComponent();
     var recognizer = new SpeechRecognizer();
     var topicconstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Development");
     recognizer.Constraints.Add(topicconstraint);
     var result = 
     recognizer.CompileConstraintsAsync();
 }
开发者ID:Karthik777,项目名称:chatbot,代码行数:9,代码来源:MainPage.xaml.cs

示例5: StartListening

        public async void StartListening(object sender, EventArgs e)
        {
            try
           {    
                //args = e;
                speechRecognizer = new SpeechRecognizer();
                StorageFolder folder = ApplicationData.Current.LocalFolder;
                var uri = new System.Uri("ms-appx:///Assets/TestGrammar.xml");
                var file = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(uri);
                speechRecognizer.Constraints.Clear();
                speechRecognizer.Constraints.Add(new SpeechRecognitionGrammarFileConstraint(file));
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                    throw new Exception("Grammar compilation failed");

                

                speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
                speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;

                Debug.WriteLine("Listener initialized");
                isListening = true;
                await speechRecognizer.ContinuousRecognitionSession.StartAsync();
                uri = new System.Uri("ms-appx:///Assets/ResponseTemplates.xml");
                file = await Windows.Storage.StorageFile.GetFileFromApplicationUriAsync(uri);
                var t = new DialogueManager(file);
                var qq = t.GenerateResponse(new Dictionary<string, string>() { { "ACTION", "DESTINATION" }, { "DESTINATION", "COFFEE_SHOP" } }, ref args);
                Debug.WriteLine(qq);
                await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
               () =>
               {
                   Speak(qq);
               });






            }
            catch (Exception ex)
            {
                isListening = false;
            }

            //return "I was returned";
        }
开发者ID:Tapanito,项目名称:F20CA,代码行数:47,代码来源:VoiceSpeech.cs

示例6: listenIn

        private async void listenIn() {
            SpeechRecognizer speechRecognizer = new SpeechRecognizer();
            speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "note finished" }));

            SpeechRecognitionCompilationResult comResult = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += Con_Result;

            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
开发者ID:AgentPierce,项目名称:Surgical-Band,代码行数:10,代码来源:MainPage.xaml.cs

示例7: listenIn

        private async void listenIn()
        {
            SpeechRecognizer speechRecognizer = new SpeechRecognizer();
            speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "sponge in", "sponge out", "instrument in", "needle in","needle out", "instrument out", "going to close" }));

            SpeechRecognitionCompilationResult comResult = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += Con_Result;

            await speechRecognizer.ContinuousRecognitionSession.StartAsync();
        }
开发者ID:AgentPierce,项目名称:Surgical-Band,代码行数:11,代码来源:EquipTrack.xaml.cs

示例8: InitSpeech

        /// <summary>
        /// When activating the scenario, ensure we have permission from the user to access their microphone, and
        /// provide an appropriate path for the user to enable access to the microphone if they haven't
        /// given explicit permission for it.
        /// </summary>
        /// <param name="e">The navigation event details</param>
        private async Task InitSpeech()
        {
            // Save the UI thread dispatcher to allow speech status messages to be shown on the UI.
            dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;

            bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
            if (permissionGained)
            {
                // Enable the recognition buttons.
                button.IsEnabled = true;

                if (speechRecognizer != null)
                {
                    // cleanup prior to re-initializing this scenario.
                    //speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged;

                    this.speechRecognizer.Dispose();
                    this.speechRecognizer = null;
                }

                // Create an instance of SpeechRecognizer.
                speechRecognizer = new SpeechRecognizer();

                // Provide feedback to the user about the state of the recognizer.
                //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged;

                // Compile the dictation topic constraint, which optimizes for dictated speech.
                var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
                speechRecognizer.Constraints.Add(dictationConstraint);
                SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();

                speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;

                // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    // Disable the recognition buttons.
                    button.IsEnabled = false;

                    // Let the user know that the grammar didn't compile properly.
                    //resultTextBlock.Visibility = Visibility.Visible;
                    //resultTextBlock.Text = "Unable to compile grammar.";
                }

            }
            else
            {
                // "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone.";
                button.IsEnabled = false;
            }

            await Task.Yield();
        }
开发者ID:gsantopaolo,项目名称:BingImageSearchSample,代码行数:59,代码来源:ToolBarViewOLD.xaml.cs

示例9: MainWindow

        public MainWindow()
        {
            InitializeComponent();

            recognizer = new SpeechRecognizer();
            List<String> constraints = new List<string>();
            //recognizer.Constraints.Add(new SpeechRecognitionListConstraint(constraints));
            IAsyncOperation<SpeechRecognitionCompilationResult> op = recognizer.CompileConstraintsAsync();
            resultGenerated = new TypedEventHandler<SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>(UpdateTextBox);
            recognizer.ContinuousRecognitionSession.ResultGenerated += resultGenerated;
            OnStateChanged = new TypedEventHandler<SpeechRecognizer, SpeechRecognizerStateChangedEventArgs>(onStateChanged);
            recognizer.StateChanged += OnStateChanged;
            op.Completed += HandleCompilationCompleted;
        }
开发者ID:kirocuto,项目名称:CortanaCommandExtension,代码行数:14,代码来源:MainWindow.xaml.cs

示例10: answerYN

 async Task<bool> answerYN(string question)
 {
     var language = SpeechRecognizer.SystemSpeechLanguage;
     speakString(question);
     string[] yn = {"Yes", "No"};
     SpeechRecognizer speechRecognizer = new SpeechRecognizer();
     SpeechRecognitionListConstraint list = new SpeechRecognitionListConstraint(yn, "yesOrNo");
     speechRecognizer.Constraints.Add(list);
     await speechRecognizer.CompileConstraintsAsync();
     SpeechRecognitionResult answerResult = await speechRecognizer.RecognizeWithUIAsync();
     if (answerResult.Text == "Yes")
         return true;
     else
         return false;
 }
开发者ID:Madpixel6,项目名称:Niewidomy,代码行数:15,代码来源:SpeechFunctions1.cs

示例11: LoadRecognizerAsync

 public async Task LoadRecognizerAsync()
 {
     var permission = await Template10.Utils.AudioUtils.RequestMicrophonePermission();
     if (permission && _SpeechRecognizer == null)
     {
         _SpeechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
         var constraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation");
         _SpeechRecognizer.Constraints.Add(constraint);
         var compilation = await _SpeechRecognizer.CompileConstraintsAsync();
         if (compilation.Status != SpeechRecognitionResultStatus.Success)
             throw new Exception(compilation.Status.ToString());
     }
     else if (!permission)
     {
         throw new Exception("RequestMicrophonePermission returned false");
     }
 }
开发者ID:timothius2005,项目名称:Template10,代码行数:17,代码来源:SpeechService.cs

示例12: Init

		private async void Init(Windows.Globalization.Language language)
		{
			ListenButton.IsEnabled = false;
			bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission();
			if (!permissionGained)
			{
				MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone.");
			}

			var recognizer = new SpeechRecognizer(language);
			var topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
			recognizer.Constraints.Add(topicConstraint);
			var compilationResult = await recognizer.CompileConstraintsAsync();

			_SpeechRecognizer = recognizer;
			ListenButton.IsEnabled = true;
		}
开发者ID:halllo,项目名称:GastManager,代码行数:17,代码来源:MainPage.xaml.cs

示例13: InitRecognizer

        public async static Task<SpeechRecognizer>  InitRecognizer()
        {
            try
            {
                if (null != recognizer)
                {                   
                    recognizer.Dispose();
                    recognizer = null;
                }
                recognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
                recognizer.Constraints.Add(
                    new SpeechRecognitionListConstraint(
                        new List<string>()
                        {
                        speechResourceMap.GetValue("account page", speechContext).ValueAsString,
                        speechResourceMap.GetValue("audit page", speechContext).ValueAsString,
                        speechResourceMap.GetValue("finace page", speechContext).ValueAsString,
                        speechResourceMap.GetValue("transfer page", speechContext).ValueAsString
                        }, "goto"));


                SpeechRecognitionCompilationResult compilationResult = await recognizer.CompileConstraintsAsync();
                if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
                {
                    recognizer.Dispose();
                    recognizer = null;
                }

                //string uiOptionsText = string.Format("Try saying '{0}', '{1}' or '{2}'",
                //        speechResourceMap.GetValue("account page", speechContext).ValueAsString,
                //        speechResourceMap.GetValue("audit page", speechContext).ValueAsString,
                //        speechResourceMap.GetValue("audit page", speechContext).ValueAsString);
                //recognizer.UIOptions.ExampleText = uiOptionsText;
                return recognizer;
            }
            catch(Exception e)
            {             
                return null;
            }
           
        }
开发者ID:DXChinaTE,项目名称:UWP-Speech-recognition-Demo,代码行数:41,代码来源:ResourceHelper.cs

示例14: OnNavigatedTo

        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            base.OnNavigatedTo(e);

            MediaElementCtrl.MediaEnded += MediaElementCtrl_MediaEnded;

            this.dispatcher = CoreWindow.GetForCurrentThread().Dispatcher;
            this.speechRecognizer = new SpeechRecognizer();

            SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync();

            speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
            speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;

            #region TTS
            try
            {
                _voice = (from voiceInformation
                            in Windows.Media.SpeechSynthesis.SpeechSynthesizer.AllVoices
                          select voiceInformation).First();

                _speechSynthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer();
                _speechSynthesizer.Voice = _voice;
            }
            catch (Exception exception)
            {
                var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception");
                messageDialog.ShowAsync().GetResults();
            }
            #endregion

            StartConversation();

            //#if DEBUG
            //            _questions.Last().Value = "1";
            //            EndConversation();
            //            return;
            //#endif
        }
开发者ID:Cellenza,项目名称:smart-apero,代码行数:39,代码来源:MainPage.xaml.cs

示例15: InitializeSpeechRecognizer

        private async void InitializeSpeechRecognizer()
        {
            if (speechRecognizer != null)
            {
                this.speechRecognizer.Dispose();
                this.speechRecognizer = null;
            }
            speechRecognizer = new SpeechRecognizer();
            var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
            speechRecognizer.Constraints.Add(topicConstraing);
            await speechRecognizer.CompileConstraintsAsync();

            var operation = await speechRecognizer.RecognizeAsync();
            if (!this.Completed && operation.Status == SpeechRecognitionResultStatus.Success)
            {
                this.Completed = true;
                ResultGenerated(operation.Text);
                speechRecognizer.RecognizeAsync().Cancel();
                speechRecognizer.Dispose();
                speechRecognizer = null;
            }
        }
开发者ID:AdrianDiaz81,项目名称:Codemotion2015,代码行数:22,代码来源:MainPage.xaml.cs


注:本文中的SpeechRecognizer.CompileConstraintsAsync方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。