本文整理汇总了C#中SpeechRecognizer.RecognizeAsync方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognizer.RecognizeAsync方法的具体用法?C# SpeechRecognizer.RecognizeAsync怎么用?C# SpeechRecognizer.RecognizeAsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognizer
的用法示例。
在下文中一共展示了SpeechRecognizer.RecognizeAsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: StartRecognizeAsync
private async Task StartRecognizeAsync()
{
try
{
var speechRecognizer = new SpeechRecognizer();
speechRecognizer.Grammars.AddGrammarFromList(
"answer",
_words);
while (!_pleaseFinish)
{
var result = await speechRecognizer.RecognizeAsync();
if (result.TextConfidence != SpeechRecognitionConfidence.Rejected)
{
ProcessResult(result);
}
else
{
Debug.WriteLine("No text!");
}
}
}
finally
{
_isRunning = false;
}
}
示例2: Button_Click_2
private async void Button_Click_2(object sender, RoutedEventArgs e)
{
SpeechRecognizer speechRecognizer = new SpeechRecognizer();
speechRecognizer.Grammars.AddGrammarFromList("color", new List<string>
{
"红色",
"白色",
"蓝色",
"绿色"
});
try
{
var result = await speechRecognizer.RecognizeAsync();
if (result.TextConfidence == SpeechRecognitionConfidence.Rejected)
{
MessageBox.Show("语音识别不到");
}
else
{
MessageBox.Show(result.Text);
}
}
catch (Exception err)
{
MessageBox.Show("请检查是否接收语音隐私协议" + err.Message + err.HResult);
}
}
示例3: StartVoiceRecognition
private async void StartVoiceRecognition()
{
await SpeakText( "Say Captains Log at any time to create a log entry." );
speechRecognizerCaptainsLogCommand = new SpeechRecognizer();
while ( !cancellationSource.IsCancellationRequested )
{
// Listen for user to say "Captains Log"
ISpeechRecognitionConstraint commandConstraint =
new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } );
speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint );
await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync();
SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync();
if ( commandResult.Status != SpeechRecognitionResultStatus.Success
|| commandResult.Confidence == SpeechRecognitionConfidence.Rejected
|| cancellationSource.IsCancellationRequested )
{
continue;
}
// Recognized user saying "Captains Log"
// Listen for the user's dictation entry
var captainsLogDictationRecognizer = new SpeechRecognizer();
ISpeechRecognitionConstraint dictationConstraint =
new SpeechRecognitionTopicConstraint(
SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" );
captainsLogDictationRecognizer.Constraints.Add( dictationConstraint );
await captainsLogDictationRecognizer.CompileConstraintsAsync();
captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before.";
captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead";
captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true;
captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true;
SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync();
if ( dictationResult.Status != SpeechRecognitionResultStatus.Success
|| dictationResult.Confidence == SpeechRecognitionConfidence.Rejected
|| string.IsNullOrWhiteSpace( dictationResult.Text )
|| cancellationSource.IsCancellationRequested )
{
captainsLogDictationRecognizer.Dispose();
continue;
}
// Recognized user's dictation entry
AddLogEntry( dictationResult.Text );
captainsLogDictationRecognizer.Dispose();
}
speechRecognizerCaptainsLogCommand.Dispose();
}
示例4: RecognizeVoiceCommand
public async static Task< string> RecognizeVoiceCommand()
{
try
{
speechRecognizer = await ResourceHelper.InitRecognizer() ;
if(null == speechRecognizer)
{
_command = ResourceHelper.GetString("Sys Err");//"系统异常";
return _command;
}
recognitionOperation = speechRecognizer.RecognizeAsync();
SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
// If successful, display the recognition result. A cancelled task should do nothing.
if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
{
if (speechRecognitionResult.Confidence == SpeechRecognitionConfidence.Rejected)
{
_command = ResourceHelper.GetString("invalid");//"对不起,无法识别您的命令";
}
else
{
string tag = "unknown";
if (speechRecognitionResult.Constraint != null)
{
// Only attempt to retreive the tag if we didn't hit the garbage rule.
tag = speechRecognitionResult.Constraint.Tag;
}
_command = speechRecognitionResult.Text;
}
}
return _command;
}
catch (Exception e)
{
return e.Message;
}
}
示例5: speech
public async void speech(string option)
{
var r = "";
try
{
var _recognizer = new SpeechRecognizer();
var _recOperation = _recognizer.RecognizeAsync();
var recoResult = await _recOperation;
r = recoResult.Text;
}
catch (Exception e)
{
r = "Exception" + e.ToString();
}
///
//return "asdasd";
DispatchCommandResult(new PluginResult(PluginResult.Status.OK, "Everything went as planned, this is a result that is passed to the success handler." + r.ToString()));
}
示例6: InitRecognitionEngine
private async void InitRecognitionEngine()
{
try
{
speechRecognizer = new SpeechRecognizer(new Language(languageTag));
}
catch
{
speechRecognizer = new SpeechRecognizer();
}
speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(GetCommandsText(), "tag1"));
//var op = speechRecognizer.CompileConstraintsAsync();
//op.AsTask().Wait();
////var a = op.GetResults();
//var op2 = speechRecognizer.RecognizeAsync();
//op2.AsTask().Wait();
//SpeechRecognitionResult result = op2.GetResults();
//if (result.Status == SpeechRecognitionResultStatus.Success)
//{
//}
var a = await speechRecognizer.CompileConstraintsAsync();
var b = a;
SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync();
//if (result.Status == SpeechRecognitionResultStatus.Success)
// phoneNumber = result.Text;
// var cultureInfo = new CultureInfo("ru-RU");
// //var cultureInfo = new CultureInfo("en-US");
// Thread.CurrentThread.CurrentCulture = cultureInfo;
// Thread.CurrentThread.CurrentUICulture = cultureInfo;
// /*
// •en-GB. English (United Kingdom)
// •en-US. English (United States)
// •de-DE. German (Germany)
// •es-ES. Spanish (Spain)
// •fr-FR. French (France)
// •ja-JP. Japanese (Japan)
// •zh-CN. Chinese (China)
// •zh-TW. Chinese (Taiwan)
// */
// var commands = GetCommandsText();
// var choices = new Choices(commands);
// var builder = new GrammarBuilder(choices);
// builder.Culture = cultureInfo;
// recognitionEngine = new SpeechRecognitionEngine();// (cultureInfo);
// recognitionEngine.SetInputToDefaultAudioDevice();
// recognitionEngine.UnloadAllGrammars();
// recognitionEngine.LoadGrammar(new Grammar(builder));
// //recognitionEngine.LoadGrammar(new DictationGrammar()); // любой текст
// recognitionEngine.SpeechRecognized += recognitionEngine_SpeechRecognized;
// recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
}
示例7: InitializeSpeechRecognizer
private async void InitializeSpeechRecognizer()
{
if (speechRecognizer != null)
{
this.speechRecognizer.Dispose();
this.speechRecognizer = null;
}
speechRecognizer = new SpeechRecognizer();
var topicConstraing = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
speechRecognizer.Constraints.Add(topicConstraing);
await speechRecognizer.CompileConstraintsAsync();
var operation = await speechRecognizer.RecognizeAsync();
if (!this.Completed && operation.Status == SpeechRecognitionResultStatus.Success)
{
this.Completed = true;
ResultGenerated(operation.Text);
speechRecognizer.RecognizeAsync().Cancel();
speechRecognizer.Dispose();
speechRecognizer = null;
}
}
示例8: VoiceRecognizer
private async void VoiceRecognizer()
{
voiceRecognizer = new SpeechRecognizer();
SpeechRecognitionTopicConstraint topicContraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "development");
voiceRecognizer.Constraints.Add(topicContraint);
SpeechRecognitionCompilationResult result = await voiceRecognizer.CompileConstraintsAsync();
SpeechRecognitionResult speechRecognitionResult = await voiceRecognizer.RecognizeAsync();
//voiceRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed;
//voiceRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
//await voiceRecognizer.ContinuousRecognitionSession.StartAsync();
if (pname == "Lorenzo")
{
if (speechRecognitionResult.Text.Contains("expensive") || speechRecognitionResult.Text.Contains("expense"))
{
//speechText.Text = "So much expensive";
ReadVoice(Error.Not_Found);
//pageView.Navigate(new Uri("http://www.americanas.com.br/produto/113151382/carro-eletrico-sport-car-vermelho-6v"));
}
else
{
ReadVoice(Error.Not_Found);
}
}
else
{
ReadVoice(Error.Not_Found);
}
}
示例9: Button_Click_4
private async void Button_Click_4(object sender, RoutedEventArgs e)
{
SpeechRecognizer speechRecognizer = new SpeechRecognizer();
speechRecognizer.Grammars.AddGrammarFromUri("music", new Uri("ms-appx:///SRGSGrammar1.xml"));
try
{
var result = await speechRecognizer.RecognizeAsync();
if (result.TextConfidence == SpeechRecognitionConfidence.Rejected)
{
MessageBox.Show("语音识别不到");
}
else
{
string music = "";
if (result.Semantics.Keys.Contains("music"))
{
music = result.Semantics["music"].Value.ToString();
}
MessageBox.Show(result.Text + "|" + music);
}
}
catch (Exception err)
{
MessageBox.Show("请检查是否接收语音隐私协议" + err.Message + err.HResult);
}
}
示例10: lineRecog
private async void lineRecog()
{
SpeechRecognizer speechRecognizer = new SpeechRecognizer();
// Compile the default dictionary
SpeechRecognitionCompilationResult compilationResult =
await speechRecognizer.CompileConstraintsAsync();
// Start recognizing
// Note: you can also use RecognizeWithUIAsync()
SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();
result = speechRecognitionResult.Text;
}
示例11: VoiceButton_Click
private async void VoiceButton_Click(object sender, RoutedEventArgs e)
{
try
{
// Get the top user-preferred language and its display name.
var topUserLanguage = Windows.System.UserProfile.GlobalizationPreferences.Languages[0];
var language = new Windows.Globalization.Language(topUserLanguage);
firstStopAttemptDone = false;
listening = true;
using (speechRecognizer = new SpeechRecognizer(language))
{
var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, WEB_SEARCH);
speechRecognizer.Constraints.Add(dictationConstraint);
SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();
// setting timeouts
speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(4.0);
speechRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(4.0);
speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.0);
speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated;
if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
return;
VisualStateManager.GoToState(this, VISUAL_STATE_LISTENING, true);
this.IsReadOnly = true;
this.Text = LISTENING_TEXT;
SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeAsync();
if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
Text = speechRecognitionResult.Text;
else
Text = SPEECH_RECOGNITION_FAILED;
}
}
catch (Exception ex)
{
System.Diagnostics.Debug.WriteLine(ex.Message);
Text = string.Empty;
}
finally
{
timer.Stop();
hypotesis = string.Empty;
VisualStateManager.GoToState(this, VISUAL_STATE_NOT_LISTENING, true);
this.IsReadOnly = false;
listening = false;
}
}
示例12: Run
public async void Run(IBackgroundTaskInstance taskInstance)
{
BackgroundTaskDeferral deferral = taskInstance.GetDeferral(); // This must be retrieved prior to subscribing to events below which use it
using (MopidyClient client = new MopidyClient())
{
await client.Open();
await client.Play("spotify:track:1hKdDCpiI9mqz1jVHRKG0E");
var speechRecognizer = new SpeechRecognizer(SpeechRecognizer.SystemSpeechLanguage);
var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
speechRecognizer.Constraints.Add(webSearchGrammar);
SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync();
// Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
if (compilationResult.Status == SpeechRecognitionResultStatus.Success)
{
while (true)
{
var recognitionOperation = speechRecognizer.RecognizeAsync();
SpeechRecognitionResult speechRecognitionResult = await recognitionOperation;
if (speechRecognitionResult.Status == SpeechRecognitionResultStatus.Success)
{
if (speechRecognitionResult.Text.StartsWith("play", StringComparison.OrdinalIgnoreCase))
{
string playSearchString = speechRecognitionResult.Text.Substring(4).Trim();
string uri;
if (playSearchString.StartsWith("artist", StringComparison.OrdinalIgnoreCase))
{
uri = await client.SearchArtist(playSearchString.Substring(6).Trim());
}
else
{
uri = await client.Search(playSearchString);
}
if (uri != null)
{
await client.Play(uri);
}
}
else if (speechRecognitionResult.Text.StartsWith("stop", StringComparison.OrdinalIgnoreCase))
{
await client.Stop();
}
else if (speechRecognitionResult.Text.StartsWith("louder", StringComparison.OrdinalIgnoreCase))
{
int volume = await client.GetVolume();
volume = Math.Min(volume + 10, 100);
await client.SetVolume(volume);
}
else if (speechRecognitionResult.Text.StartsWith("quieter", StringComparison.OrdinalIgnoreCase))
{
int volume = await client.GetVolume();
volume = Math.Max(volume - 10, 0);
await client.SetVolume(volume);
}
else if (speechRecognitionResult.Text.StartsWith("mute", StringComparison.OrdinalIgnoreCase))
{
await client.SetVolume(0);
}
}
else
{
//resultTextBlock.Visibility = Visibility.Visible;
//resultTextBlock.Text = string.Format("Speech Recognition Failed, Status: {0}", speechRecognitionResult.Status.ToString());
}
}
}
}
}
示例13: Recognize
//.........这里部分代码省略.........
if (isRecognizing && recognizer != null)
{
await recognizer.StopRecognitionAsync();
}
recognizer = new SpeechRecognizer();
//if (recognizer != null)
//{
//}
//else
//{
// //recognizer.Constraints?.Clear();
// //await recognizer.CompileConstraintsAsync();
//}
if (grammarFileConstraint != null)
{
recognizer.Constraints.Add(grammarFileConstraint);
}
SpeechRecognitionResult recognize = null;
try
{
isRecognizing = false;
SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = SpeechStatus.None });
await recognizer.CompileConstraintsAsync();
isRecognizing = true;
SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = SpeechStatus.Listening });
recognize = await (ui ? recognizer.RecognizeWithUIAsync() : recognizer.RecognizeAsync());
}
catch (Exception e)
{
Debug.WriteLine(e.GetType() + ":" + e.Message);
if (recognize != null)
{
result.status = recognize.Status;
}
result.confidence = 5;
return result;
}
finally
{
isRecognizing = false;
SpeechStatusChanged?.Invoke(this, new SpeechArgs { Status = isUserStopped ? SpeechStatus.Stopped : SpeechStatus.None });
}
result.status = isUserStopped ? SpeechRecognitionResultStatus.UserCanceled : recognize.Status;
if (constraints == null)
{
result.text = recognize.Text;
return result;
}
result.confidence = (int) recognize.Confidence;
var text = recognize.Text.ToUpperInvariant();
var items2 = constraints.Split(';');
示例14: btnSearch_Click
private async void btnSearch_Click(object sender, RoutedEventArgs e)
{
this.txtCortanaMessages.Text = "Je vous écoute...";
Windows.Globalization.Language langFR = new Windows.Globalization.Language("fr-FR");
SpeechRecognizer recognizer = new SpeechRecognizer(langFR);
SpeechRecognitionTopicConstraint topicConstraint
= new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development");
recognizer.Constraints.Add(topicConstraint);
await recognizer.CompileConstraintsAsync(); // Required
var recognition = recognizer.RecognizeAsync();
recognition.Completed += this.Recognition_Completed;
}
示例15: SetState
/// <summary>
/// Move to a new state.
/// </summary>
private async Task SetState(SpeechDialogBoxState state)
{
// Do not interrupt while speaking.
while (this.state == SpeechDialogBoxState.Speaking)
{
await Task.Delay(200);
}
this.state = state;
await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, new DispatchedHandler(
async () =>
{
// Hide all.
this.DefaultState.Visibility = Visibility.Collapsed;
this.TypingState.Visibility = Visibility.Collapsed;
this.ListeningState.Visibility = Visibility.Collapsed;
this.ThinkingState.Visibility = Visibility.Collapsed;
switch (this.state)
{
case SpeechDialogBoxState.Default:
this.DefaultState.Visibility = Visibility.Visible;
break;
case SpeechDialogBoxState.Typing:
this.TypingState.Visibility = Visibility.Visible;
break;
case SpeechDialogBoxState.Listening:
this.ListeningState.Visibility = Visibility.Visible;
this.MediaElement.Source = new Uri("ms-appx:///Assets//Listening.wav");
SpeechRecognizer recognizer = new SpeechRecognizer();
foreach (var constraint in this.Constraints)
{
recognizer.Constraints.Add(constraint);
}
await recognizer.CompileConstraintsAsync();
var reco = recognizer.RecognizeAsync();
reco.Completed += this.SpeechRecognition_Completed;
break;
case SpeechDialogBoxState.Thinking:
this.ThinkingState.Visibility = Visibility.Visible;
break;
default:
break;
}
}));
}