本文整理汇总了C#中SpeechRecognitionEngine.Dispose方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.Dispose方法的具体用法?C# SpeechRecognitionEngine.Dispose怎么用?C# SpeechRecognitionEngine.Dispose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.Dispose方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestMethod1
public void TestMethod1()
{
SpeechRecognitionEngine engine = new SpeechRecognitionEngine(new CultureInfo("en-US"));
engine.LoadGrammar(new DictationGrammar());
engine.Dispose();
}
示例2: button1_Click
private void button1_Click(object sender, EventArgs e)
{
_completed = new ManualResetEvent(false);
SpeechRecognitionEngine _recognizer = new SpeechRecognitionEngine();
_recognizer.RequestRecognizerUpdate(); // request for recognizer update
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("test"))); // load a grammar
_recognizer.RequestRecognizerUpdate(); // request for recognizer update
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("exit"))); // load a "exit" grammar
_recognizer.SpeechRecognized += _recognizer_SpeechRecognized;
_recognizer.SetInputToDefaultAudioDevice(); // set the input of the speech recognizer to the default audio device
_recognizer.RecognizeAsync(RecognizeMode.Multiple); // recognize speech asynchronous
_completed.WaitOne(); // wait until speech recognition is completed
_recognizer.Dispose(); // dispose the speech recognition engine
}
示例3: Main
static void Main(string[] args)
{
AppDomain.CurrentDomain.UnhandledException += new UnhandledExceptionEventHandler(CurrentDomain_UnhandledException);
voice = new Voice();
commandProcessor = ConfigureCommands().CreateCommandProcessor();
commandProcessor.CommandRecognized += sound.NotifyRecognizedCommandAsync;
commandProcessor.CommandRejected += sound.NotifyUnrecognizedCommandAsync;
Console.WriteLine("Attached PIR-1 devices:");
foreach (var pir in PIRDriver.Instance.QueryAttachedDevices())
Console.WriteLine("\t{0}", pir);
ConfigureLightShow();
Console.WriteLine("Configured LightShow");
var recognizer = GetKinectRecognizer();
using (var sensor = GetKinectSensor())
{
/* Skeleton-based beam control is disabled due to an OOM issue when long running.
var beamController = new SkeletonBasedBeamControl();
beamController.AttentionGestureDetected += delegate(SkeletonBasedBeamControl controller)
{
sound.NotifyAttentionGestureRecognized();
};
beamController.Start(sensor);
*/
sensor.Start();
var source = sensor.AudioSource;
source.AutomaticGainControlEnabled = false;
source.EchoCancellationMode = EchoCancellationMode.None;
source.NoiseSuppression = true;
Console.WriteLine("Using: {0}", recognizer.Name);
using (Stream s = source.Start())
{
SpeechRecognitionEngine sre = null;
var sreLock = new object();
EventHandler<SpeechDetectedEventArgs> SreSpeechDetected = delegate(object sender, SpeechDetectedEventArgs dea) { SpeechDetected(source, dea); };
Action startRecognizer = delegate()
{
SpeechRecognitionEngine oldSre = null;
lock (sreLock)
{
if (sre != null)
{
oldSre = sre;
}
sre = new SpeechRecognitionEngine(recognizer.Id);
sre.UpdateRecognizerSetting("AdaptationOn", 1);
sre.UpdateRecognizerSetting("PersistedBackgroundAdaptation", 1);
sre.LoadGrammar(commandProcessor.CreateGrammar());
sre.SpeechDetected += SreSpeechDetected;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
sre.AudioSignalProblemOccurred += SreAudioSignalProblemOccurred;
sre.EndSilenceTimeoutAmbiguous = TimeSpan.FromMilliseconds(AmbiguousSilenceTimeout);
sre.EndSilenceTimeout = TimeSpan.FromMilliseconds(UnambiguousSilenceTimeout);
sre.SpeechRecognized += delegate(object sender, SpeechRecognizedEventArgs r)
{
Console.WriteLine("Handling text {0} in command processor", r.Result.Text);
try
{
commandProcessor.ProcessSpeech(r.Result);
}
catch (Exception ex)
{
Console.WriteLine("Command handler failed: " + ex.ToString());
voice.SpeakAsync("Failed to execute command. Sorry!");
}
};
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
sre.RecognizeAsync(RecognizeMode.Multiple);
Trace.TraceInformation("New recognizer started");
if (oldSre != null)
{
oldSre.RecognizeAsyncStop();
oldSre.SpeechDetected -= SreSpeechDetected;
oldSre.SpeechHypothesized -= SreSpeechHypothesized;
oldSre.SpeechRecognitionRejected -= SreSpeechRecognitionRejected;
oldSre.AudioSignalProblemOccurred -= SreAudioSignalProblemOccurred;
oldSre.Dispose();
Trace.TraceInformation("Old recognizer disposed");
//.........这里部分代码省略.........