本文整理汇总了C#中SpeechRecognizer类的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognizer类的具体用法?C# SpeechRecognizer怎么用?C# SpeechRecognizer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SpeechRecognizer类属于命名空间,在下文中一共展示了SpeechRecognizer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: StartVoiceRecognition
private async void StartVoiceRecognition()
{
await SpeakText( "Say Captains Log at any time to create a log entry." );
speechRecognizerCaptainsLogCommand = new SpeechRecognizer();
while ( !cancellationSource.IsCancellationRequested )
{
// Listen for user to say "Captains Log"
ISpeechRecognitionConstraint commandConstraint =
new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } );
speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint );
await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync();
SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync();
if ( commandResult.Status != SpeechRecognitionResultStatus.Success
|| commandResult.Confidence == SpeechRecognitionConfidence.Rejected
|| cancellationSource.IsCancellationRequested )
{
continue;
}
// Recognized user saying "Captains Log"
// Listen for the user's dictation entry
var captainsLogDictationRecognizer = new SpeechRecognizer();
ISpeechRecognitionConstraint dictationConstraint =
new SpeechRecognitionTopicConstraint(
SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" );
captainsLogDictationRecognizer.Constraints.Add( dictationConstraint );
await captainsLogDictationRecognizer.CompileConstraintsAsync();
captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before.";
captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead";
captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true;
captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true;
SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync();
if ( dictationResult.Status != SpeechRecognitionResultStatus.Success
|| dictationResult.Confidence == SpeechRecognitionConfidence.Rejected
|| string.IsNullOrWhiteSpace( dictationResult.Text )
|| cancellationSource.IsCancellationRequested )
{
captainsLogDictationRecognizer.Dispose();
continue;
}
// Recognized user's dictation entry
AddLogEntry( dictationResult.Text );
captainsLogDictationRecognizer.Dispose();
}
speechRecognizerCaptainsLogCommand.Dispose();
}
示例2: InitSpeechRecognition
private async void InitSpeechRecognition()
{
try
{
if (speechRecognizerContinuous == null)
{
speechRecognizerContinuous = new SpeechRecognizer();
speechRecognizerContinuous.Constraints.Add(
new SpeechRecognitionListConstraint(
new List<String>() { "Start Listening" }, "start"));
SpeechRecognitionCompilationResult contCompilationResult =
await speechRecognizerContinuous.CompileConstraintsAsync();
if (contCompilationResult.Status != SpeechRecognitionResultStatus.Success)
{
throw new Exception();
}
speechRecognizerContinuous.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated;
}
await speechRecognizerContinuous.ContinuousRecognitionSession.StartAsync();
}
catch (Exception ex)
{
System.Diagnostics.Debug.WriteLine(ex.Message);
}
}
示例3: Button_Click_2
private async void Button_Click_2(object sender, RoutedEventArgs e)
{
SpeechRecognizer speechRecognizer = new SpeechRecognizer();
speechRecognizer.Grammars.AddGrammarFromList("color", new List<string>
{
"红色",
"白色",
"蓝色",
"绿色"
});
try
{
var result = await speechRecognizer.RecognizeAsync();
if (result.TextConfidence == SpeechRecognitionConfidence.Rejected)
{
MessageBox.Show("语音识别不到");
}
else
{
MessageBox.Show(result.Text);
}
}
catch (Exception err)
{
MessageBox.Show("请检查是否接收语音隐私协议" + err.Message + err.HResult);
}
}
示例4: Initialize
public static void Initialize()
{
try
{
if (Speech.initialized)
{
return;
}
Speech.recognizer = new SpeechRecognizer();
Speech.synthesizer = new SpeechSynthesizer();
Speech.recognizerUI = new SpeechRecognizerUI();
IEnumerable<VoiceInformation> DeVoices = from voice in InstalledVoices.All
where voice.Gender == VoiceGender.Female
&& voice.Language == "de-DE"
select voice;
Speech.synthesizer.SetVoice(DeVoices.ElementAt(0));
Speech.initialized = true;
IsolatedStorageSettingsHelper.SetSpeechPackageState(true);
}
catch (Exception ex)
{
IsolatedStorageSettingsHelper.SetSpeechPackageState(false);
throw new Exception();
}
}
示例5: InitializeSR
private void InitializeSR()
{
spRecognizer = new SpeechRecognizer();
spRecognizer.Enabled = true;
spRecognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(spRecognizer_SpeechRecognized);
}
示例6: MainWindow
/// <summary>
/// Constructor initializes necessary variables and reads in saved constraints from text file.
/// </summary>
public MainWindow()
{
InitializeComponent();
string fileName = @"Stored_Constraints.txt";
Debug.WriteLine(DateTime.Now.ToString());
filePath = System.IO.Path.Combine(Directory.GetCurrentDirectory(), fileName);
dateTimesForConstraints = new Dictionary<string, string>();
backgroundListener = new SpeechRecognizer();
constraints = new List<string>();
BLResultGenerated = new TypedEventHandler<SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>(blResultGenerated);
backgroundListener.ContinuousRecognitionSession.ResultGenerated += BLResultGenerated;
constraints = readInConstraintsFromFile();
currentlyStoredConstraints = constraints.ToList();
updateConstraintsWindow(constraints);
this.Closing += OnAppClosing;
var waitOn = loadOnStart();
while (waitOn.Status != AsyncStatus.Completed) { }
var ff = backgroundListener.ContinuousRecognitionSession.StartAsync();
notifyIcon = new NotifyIcon();
notifyIcon.Icon = new System.Drawing.Icon("trayImage.ico");
notifyIcon.Visible = true;
notifyIcon.DoubleClick +=
delegate (object sender, EventArgs args)
{
this.Show();
this.WindowState = WindowState.Normal;
};
}
示例7: ZenMode
public ZenMode() {
InitializeComponent();
DataContext = cloud = Connection.CurrentCloud;
cloud.Controller.Messages.CollectionChanged += ScrollDown;
recognizer = new SpeechRecognizer();
doneWithZen = false;
}
示例8: SR_AudioLevelChanged
void SR_AudioLevelChanged(SpeechRecognizer sender,
SpeechRecognitionAudioLevelChangedEventArgs args)
{
var v = args.AudioLevel;
if (v > 0) VolumeMeter.Opacity = v / 50;
else VolumeMeter.Opacity = Math.Abs((v - 50) / 100);
}
示例9: MainWindow
public MainWindow()
{
InitializeComponent();
ColorsList = new List<string>();
speechRecognizer = new SpeechRecognizer();
this.Loaded += MainWindow_Loaded;
}
示例10: InitializeRecognizer
/// <summary>
/// Initialize Speech Recognizer and compile constraints.
/// </summary>
/// <param name="recognizerLanguage">Language to use for the speech recognizer</param>
/// <returns>Awaitable task.</returns>
private async Task InitializeRecognizer(Language recognizerLanguage)
{
MicrophoneAccessStatus status = await AudioCapturePermissions.RequestMicrophoneAccessAsync();
if (status != MicrophoneAccessStatus.Allowed)
{
string prompt = status == MicrophoneAccessStatus.NoCaptureDevices ?
"没有检测到音频捕获设备,请检查设备后重试" :
"您没有允许本应用访问麦克风,请在 设置 -> 隐私 -> 麦克风 中设置";
var messageDialog = new MessageDialog(prompt);
await messageDialog.ShowAsync();
throw new Exception($"Request microphone access failed. Status: {status}");
}
Dispose();
// Create an instance of SpeechRecognizer.
_speechRecognizer = new SpeechRecognizer(recognizerLanguage);
// Add a web search topic constraint to the recognizer.
var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch");
_speechRecognizer.Constraints.Add(webSearchGrammar);
// RecognizeWithUIAsync allows developers to customize the prompts.
_speechRecognizer.UIOptions.AudiblePrompt = "请说出您想搜索的东西";
_speechRecognizer.UIOptions.ExampleText = "例如:“你好,美女”";
// Compile the constraint.
SpeechRecognitionCompilationResult compilationResult = await _speechRecognizer.CompileConstraintsAsync();
// Check to make sure that the constraints were in a proper format and the recognizer was able to compile it.
if (compilationResult.Status != SpeechRecognitionResultStatus.Success)
throw new Exception($"Unable to compile grammar. Status: {compilationResult.Status}");
}
示例11: SpeechButton_Click
private async void SpeechButton_Click(object sender, RoutedEventArgs e)
{
// Create an instance of SpeechRecognizer.
this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer();
// You could create this array dynamically.
string[] responses = { "Start", "Stop", "Go left", "Go right", "Go home", "Go to home", "Go to base" };
// Add a list constraint to the recognizer.
var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo");
speechRecognizer.UIOptions.ExampleText = @"Ex. 'Yes', 'No'";
speechRecognizer.Constraints.Add(listConstraint);
// Compile the constraint.
await speechRecognizer.CompileConstraintsAsync();
// Start recognition.
Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await this.speechRecognizer.RecognizeWithUIAsync();
var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Command received");
// Do something with the recognition result.
if (speechRecognitionResult.Text.Equals("Go home") || speechRecognitionResult.Text.Equals("Go to home") || speechRecognitionResult.Text.Equals("Go to base"))
{
messageDialog = new Windows.UI.Popups.MessageDialog("Okay, heading home now..", "Text spoken");
}
await messageDialog.ShowAsync();
}
示例12: StartRecognizeAsync
private async Task StartRecognizeAsync()
{
try
{
var speechRecognizer = new SpeechRecognizer();
speechRecognizer.Grammars.AddGrammarFromList(
"answer",
_words);
while (!_pleaseFinish)
{
var result = await speechRecognizer.RecognizeAsync();
if (result.TextConfidence != SpeechRecognitionConfidence.Rejected)
{
ProcessResult(result);
}
else
{
Debug.WriteLine("No text!");
}
}
}
finally
{
_isRunning = false;
}
}
示例13: SpeechRecognitionService
private SpeechRecognitionService()
{
_recognizer = new SpeechRecognizer();
_recognizer.Constraints.Add(new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch,
"webSearch"));
_recognizer.CompileConstraintsAsync().AsTask().Wait();
_recognizer.ContinuousRecognitionSession.ResultGenerated += RecognitionFound;
}
示例14: SpeechInterpreter
/// <summary>
/// Initializes a new instance of <see cref="SpeechInterpreter"/>
/// </summary>
/// <param name="container">The IoC container</param>
public SpeechInterpreter(Container container)
{
m_Container = container;
m_Recognizer = new SpeechRecognizer(/*new Language("en-US")*/);
m_Recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated;
m_Recognizer.StateChanged += RecognizerStateChanged;
}
示例15: MainPage
public MainPage()
{
this.InitializeComponent();
var recognizer = new SpeechRecognizer();
var topicconstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Development");
recognizer.Constraints.Add(topicconstraint);
var result =
recognizer.CompileConstraintsAsync();
}