本文整理汇总了C#中SpeechRecognitionEngine.LoadGrammarAsync方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.LoadGrammarAsync方法的具体用法?C# SpeechRecognitionEngine.LoadGrammarAsync怎么用?C# SpeechRecognitionEngine.LoadGrammarAsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.LoadGrammarAsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: StartDesign
public void StartDesign()
{
//In Process SpeewchRecognizer
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
recognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
recognizer.SetInputToDefaultAudioDevice();
GrammarBuilder clear = new GrammarBuilder("Clear");
GrammarBuilder insert = new GrammarBuilder("Insert");
Choices gates = new Choices(new string[] { "and", "or", "not", "ex or", "nor", "nand" });
Choices columns = new Choices(new string[] { "one", "too", "three", "four", "five", "six", "seven", "eight" });
Choices rows = new Choices(new string[] { "one", "too", "three", "four", "five" });
Choices orientation = new Choices(new string[] { "left", "right", "up", "down" });
insert.Append(gates);
insert.Append(columns);
insert.Append(rows);
insert.Append("towards");
insert.Append(orientation);
GrammarBuilder connect = new GrammarBuilder("Connect");
connect.Append("output");
connect.Append(columns);
connect.Append(rows);
connect.Append("to");
connect.Append("input");
connect.Append(columns);
connect.Append(rows);
Grammar _clear_grammar = new Grammar(clear);
Grammar _insert_grammar = new Grammar(insert);
Grammar _connect_grammar = new Grammar(connect);
recognizer.LoadGrammarAsync(_clear_grammar);
recognizer.LoadGrammarAsync(_insert_grammar);
recognizer.LoadGrammarAsync(_connect_grammar);
Application.EnableVisualStyles();
Application.SetCompatibleTextRenderingDefault(false);
Application.Run(new Form1());
//recognizer.RecognizeAsync(RecognizeMode.Multiple);
while (true)
{
recognizer.Recognize();
}
}
示例2: Main
static void Main(string[] args)
{
try
{
ss.SetOutputToDefaultAudioDevice();
Console.WriteLine("\n(Speaking: I am awake)");
ss.Speak("I am awake");
CultureInfo ci = new CultureInfo("en-us");
sre = new SpeechRecognitionEngine(ci);
sre.SetInputToDefaultAudioDevice();
sre.SpeechRecognized += sre_SpeechRecognized;
Choices ch_StartStopCommands = new Choices();
ch_StartStopCommands.Add("Alexa record");
ch_StartStopCommands.Add("speech off");
ch_StartStopCommands.Add("klatu barada nikto");
GrammarBuilder gb_StartStop = new GrammarBuilder();
gb_StartStop.Append(ch_StartStopCommands);
Grammar g_StartStop = new Grammar(gb_StartStop);
sre.LoadGrammarAsync(g_StartStop);
sre.RecognizeAsync(RecognizeMode.Multiple); // multiple grammars
while (done == false) { ; }
Console.WriteLine("\nHit <enter> to close shell\n");
Console.ReadLine();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
Console.ReadLine();
}
}
示例3: SetupSpeech
void SetupSpeech()
{
speechEngine = new SpeechRecognitionEngine();
speechTalk = new SpeechSynthesizer();
speechEngine.LoadGrammarAsync(new DictationGrammar());
speechEngine.SpeechRecognized += speechEngine_SpeechRecognized;
speechEngine.SetInputToDefaultAudioDevice();
speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
示例4: VoiceCommandEngine
public VoiceCommandEngine(string callName)
{
this.callName = callName;
engine = new SpeechRecognitionEngine();
engine.SetInputToDefaultAudioDevice();
var choice = new Choices(new[] {
"open",
"open file",
"mute",
"unmute",
"increase volume",
"raise volume",
"volume up",
"decrease volume",
"lower volume",
"volume down",
"hide",
"show",
"help",
"stop listening",
"close",
"play",
"pause",
"rewind",
"stop",
"next chapter",
"skip chapter",
"previous chapter",
"next",
"next file",
"previous",
"previous file",
"fullscreen",
"view fullscreen",
"go fullscreen",
"exit fullscreen",
"leave fullscreen",
"whats playing"
});
var grammarBuilder = new GrammarBuilder(callName);
grammarBuilder.Append(choice.ToGrammarBuilder());
// add the grammars
engine.LoadGrammarAsync(new Grammar(grammarBuilder));
// adds handlers for the grammar's speech recognized event.
engine.SpeechRecognized += recognizer_SpeechRecognized;
engine.AudioLevelUpdated += engine_AudioLevelUpdated;
}
示例5: Main
// Initialize an in-process speech recognition engine.
static void Main(string[] args)
{
using (SpeechRecognitionEngine recognizer =
new SpeechRecognitionEngine())
{
// Create and load a grammar.
string[] myWords = new string[] { "Me", "Kiss", "Fluff", "Yell", "Kind", "Crack", "Hope", "Check", "Lake", "Steep", "Shell", "Bark", "Tooth", "Mouse", "Force", "Fringe", "Flight", "Haunt", "Asked", "Going", "Table", "Giant", "Bully", "Treated", "Spying", "Wiggle", "Shredded", "Picnic", "Decoy", "Slaying", "Scheming", "Happier", "Joyous", "Riotous", "Chow", "Cookie", "Feud", "Eighty", "Host", "Weather", "Crawl", "Stew" }; //Sets the words that it will be listening for
Choices commands = new Choices(); //Usually the system is sets up the words expected to be commands. Our "commands" will be the 42 words given.
commands.Add(myWords); //This adds my 42 words to the commands to be recognized (commands being a list of words).
GrammarBuilder gBuilder = new GrammarBuilder(); //This is setting up the system that will understand the words
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
recognizer.LoadGrammarAsync(grammar);
recognizer.SetInputToDefaultAudioDevice();
//Grammar dictation = new DictationGrammar();
//dictation.Name = "Dictation Grammar";
// recognizer.LoadGrammar(dictation);
StreamReader sr = new StreamReader(@"c:\Users\Taylor\Desktop\AllVoiceSamples\Input.txt");
// Read the input file to a string
String line = sr.ReadToEnd();
// Configure the input to the recognizer.
recognizer.SetInputToWaveFile(@"c:\Users\Taylor\Desktop\AllVoiceSamples\" + line);
// Attach event handlers for the results of recognition.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.RecognizeCompleted +=
new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);
// Perform recognition on the entire file.
Console.WriteLine("Starting asynchronous recognition...");
completed = false;
recognizer.RecognizeAsync();
// Keep the console window open.
while (!completed)
{
Console.ReadLine();
}
Console.WriteLine("Done.");
}
Console.WriteLine();
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
示例6: Main
static void Main(string[] args)
{
// Create an in-process speech recognizer.
using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US")))
{
// Create a grammar for choosing commandChoices for a flight.
Choices commandChoices = new Choices(new string[] { "Lights On", "Lights Off", "All Off", "Say Time"});
GrammarBuilder gb = new GrammarBuilder();
gb.Append(Properties.Settings.Default.AssistantName);
gb.Append(commandChoices);
// Construct a Grammar object and load it to the recognizer.
Grammar commandChooser = new Grammar(gb);
commandChooser.Name = ("Command Chooser");
recognizer.LoadGrammarAsync(commandChooser);
// Attach event handlers.
recognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(SpeechDetectedHandler);
recognizer.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechHypothesizedHandler);
recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejectedHandler);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognizedHandler);
recognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(RecognizeCompletedHandler);
// Assign input to the recognizer and start asynchronous
recognizer.SetInputToDefaultAudioDevice();
_completed = false;
Console.WriteLine("Starting asynchronous recognition...");
//recognizer.RecognizeAsync(RecognizeMode.Multiple);
recognizer.EmulateRecognizeAsync("Nigel Lights On");
// Wait 30 seconds, and then cancel asynchronous recognition.
Thread.Sleep(TimeSpan.FromSeconds(30));
recognizer.RecognizeAsyncCancel();
// Wait for the operation to complete.
while (!_completed)
{
Thread.Sleep(333);
}
Console.WriteLine("Done.");
}
Console.WriteLine();
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
示例7: SpeechRecognition_Initialize
private void SpeechRecognition_Initialize()
{
recognitionEngine = new SpeechRecognitionEngine();
Choices commands = new Choices();
string[] choices = {"hi zira", "how are you today?", "i feel sick", "good bye zira", "yes" };
commands.Add(choices);
GrammarBuilder grammarBuilder = new GrammarBuilder();
grammarBuilder.Append(commands);
Grammar grammar = new Grammar(grammarBuilder);
recognitionEngine.LoadGrammarAsync(grammar);
recognitionEngine.SetInputToDefaultAudioDevice();
recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
recognitionEngine.SpeechRecognized += recognitionEngine_SpeechRecognized;
}
示例8: InitializeSpeechRecognition
private void InitializeSpeechRecognition()
{
try
{
var c = new Choices(_cache.Commands.Keys.ToArray());
var gb = new GrammarBuilder(c);
var g = new Grammar(gb);
_rec = new SpeechRecognitionEngine();
_rec.InitialSilenceTimeout = TimeSpan.FromSeconds(3);
_rec.SpeechHypothesized += OnSpeechHypothesized;
_rec.SpeechRecognitionRejected += OnSpeechRecognitionRejected;
_rec.RecognizeCompleted += OnSpeechRecognized;
_rec.LoadGrammarAsync(g);
_rec.SetInputToDefaultAudioDevice();
_isEnabled = true;
}
catch { /* Speech Recognition hasn't been enabled on Windows */ }
}
示例9: LoadGrammar
private void LoadGrammar(SpeechRecognitionEngine speechRecognitionEngine)
{
startListeningChoices = new Choices();
foreach (var phrase in this.startListeningPhrases)
{
startListeningChoices.Add(phrase.Key);
}
stopListeningChoices = new Choices();
foreach (var phrase in this.stopListeningPhrases)
{
stopListeningChoices.Add(phrase.Key);
}
booleanChoices = new Choices();
foreach (var phrase in this.booleanPhrases)
{
booleanChoices.Add(phrase.Key);
}
kinectMotorChoices = new Choices();
foreach (var phrase in this.kinectMotorPhrases)
{
kinectMotorChoices.Add(phrase.Key);
}
startScreenChoices = new Choices();
foreach (var phrase in this.startScreenPhrases)
{
startScreenChoices.Add(phrase.Key);
}
instrumentChoices = new Choices();
foreach (var phrase in this.instrumentPhrases)
{
instrumentChoices.Add(phrase.Key);
}
wallChoices = new Choices();
foreach (var phrase in this.wallPhrases)
{
wallChoices.Add(phrase.Key);
}
/*
* ADD NEW GRAMMARS HERE
* Copy code from above, and place it just above this comment
* Amend "allChoices" to add the new dictionary
* Add to "allDicts" further down
*/
var allChoices = new Choices();
allChoices.Add(startScreenChoices);
allChoices.Add(kinectMotorChoices);
// This is needed to ensure that it will work on machines with any culture, not just en-us.
var gb = new GrammarBuilder(startListeningChoices) { Culture = speechRecognitionEngine.RecognizerInfo.Culture };
gb.Append(allChoices);
var g = new Grammar(gb);
var g2 = new Grammar(startListeningChoices);
speechRecognitionEngine.LoadGrammarAsync(g);
speechRecognitionEngine.LoadGrammarAsync(g2);
speechRecognitionEngine.SpeechRecognized += this.SreSpeechRecognized;
speechRecognitionEngine.SpeechHypothesized += this.SreSpeechHypothesized;
speechRecognitionEngine.SpeechRecognitionRejected += this.SreSpeechRecognitionRejected;
}
示例10: backgroundWorker1_DoWork
private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
{
if (controlvar != 0 || lastcontrolvar!=0)
{
using (
SpeechRecognitionEngine recognizer =
new SpeechRecognitionEngine(
new System.Globalization.CultureInfo("en-IN")))
{
Choices questions = new Choices(new string[] {"Where", "Read", "Repeat"});
GrammarBuilder findServices = new GrammarBuilder("Navi");
findServices.Append(questions);
Grammar servicesGrammar = new Grammar(findServices);
recognizer.LoadGrammarAsync(servicesGrammar);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
while (true)
{
Console.ReadLine();
}
}
}
}
示例11: CreateSRE
private SpeechRecognitionEngine CreateSRE(string culture, Choices choices, Action<object, SpeechRecognizedEventArgs> speechRecognizedEvent)
{
SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo(culture));
// Create a GrammarBuilder object and append the Choices object.
GrammarBuilder gb = new GrammarBuilder();
gb.Append(choices);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
sre.LoadGrammarAsync(g);
//sre.InitialSilenceTimeout = TimeSpan.FromSeconds(1);
//sre.BabbleTimeout = TimeSpan.FromSeconds(1);
sre.EndSilenceTimeout = TimeSpan.FromSeconds(1);
sre.EndSilenceTimeoutAmbiguous = TimeSpan.FromSeconds(.5);
// Register a handler for the SpeechRecognized event.
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechRecognizedEvent);
sre.SetInputToDefaultAudioDevice();
sre.RecognizeAsync(RecognizeMode.Multiple);
return sre;
}
示例12: Flow_StateChanged
void Flow_StateChanged(object sender, MediaFlowStateChangedEventArgs e)
{
Log("ControlAVCall Flow_StateChanged PreviousState=" + e.PreviousState + " State=" + e.State);
AudioVideoFlow avFlow = (AudioVideoFlow)sender;
if (avFlow.State == MediaFlowState.Active)
{
SpeechRecognitionConnector speechRecognitionConnector = new SpeechRecognitionConnector();
speechRecognitionConnector.AttachFlow(avFlow);
SpeechRecognitionStream stream = speechRecognitionConnector.Start();
_speechRecognitionEngine = new SpeechRecognitionEngine();
_speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_speechRecognitionEngine_SpeechRecognized);
_speechRecognitionEngine.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(_speechRecognitionEngine_LoadGrammarCompleted);
Choices pathChoice = new Choices(new string[] { "previous", "next" });
Grammar gr = new Grammar(new GrammarBuilder(pathChoice));
_speechRecognitionEngine.LoadGrammarAsync(gr);
SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
_speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
_speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
}
else
{
if (avFlow.SpeechRecognitionConnector != null)
{
avFlow.SpeechRecognitionConnector.DetachFlow();
}
}
}
示例13: initializeSpeech
private void initializeSpeech()
{
inSpeech = true;
System.Console.Write("Initialize speech");
SS.Recognition.RecognizerInfo ri = SS.Recognition.SpeechRecognitionEngine.InstalledRecognizers().FirstOrDefault();
sre = new SpeechRecognitionEngine(ri.Id);
Choices letters = new Choices(new string[] { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" });
GrammarBuilder gb = new GrammarBuilder("Guess");
gb.Append(letters);
Grammar grammar = new Grammar(gb);
grammar.Name = "DisK of Demise";
sre.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
//sre.SetInputToDefaultAudioDevice();
sre.LoadGrammarAsync(grammar);
}
示例14: AddInputMic
/// <summary>
/// Adds a new microphone instance
/// </summary>
/// <param name="instance">The instance id of the microphone</param>
/// <param name="stream">The audio stream</param>
/// <param name="status">The status of the microphone</param>
/// <param name="shouldBeOn">Whether the speech recognition engine should be turned on</param>
public void AddInputMic(string instance, UDPClient client, string status, bool shouldBeOn)
{
try
{
var sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
sre.SetInputToAudioStream(client.AudioStream, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Mono));
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(RecognitionHandler);
sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(RecognitionRejectedHandler);
DictationGrammar customDictationGrammar = new DictationGrammar("grammar:dictation");
customDictationGrammar.Name = "dictation";
customDictationGrammar.Enabled = true;
sre.LoadGrammar(customDictationGrammar);
mics.Add(instance, new Microphone(sre,client, status, shouldBeOn,port));
foreach (var g in grammars)
{
var gram = new CombinedGrammar(g.Key, g.Value);
sre.LoadGrammarAsync(gram.compiled);
}
if (shouldBeOn)
{
sre.RecognizeAsync(RecognizeMode.Multiple);
}
}
catch (IOException)
{
//negotiating connection with mic failed.
}
}
示例15: RecognizeSpeech
public void RecognizeSpeech()
{
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
recognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
recognizer.SetInputToDefaultAudioDevice();
GrammarBuilder commandStarter = new GrammarBuilder("Command");
GrammarBuilder clear = new GrammarBuilder("Clear");
GrammarBuilder insert = new GrammarBuilder("Insert");
Choices gates = new Choices(new string[] { "and", "or", "not", "exor", "nor", "nand" });
Choices columns = new Choices(new string[] { "zero","one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen" });
Choices rows = new Choices(new string[] { "zero" ,"one", "two", "three", "four", "five", "six", "seven", "eight", "nine" });
//Choices orientation = new Choices(new string[] { "left", "right", "up", "down" });
insert.Append(gates);
insert.Append("R");
insert.Append(rows);
insert.Append("C");
insert.Append(columns);
//insert.Append("towards");
//insert.Append(orientation);
GrammarBuilder connect = new GrammarBuilder("Connect");
connect.Append("output");
connect.Append(columns);
connect.Append(rows);
connect.Append("to");
connect.Append("input");
connect.Append(columns);
connect.Append(rows);
Grammar _clear_grammar = new Grammar(clear);
Grammar _insert_grammar = new Grammar(insert);
Grammar _connect_grammar = new Grammar(connect);
Grammar _command_starter = new Grammar(commandStarter);
recognizer.LoadGrammarAsync(_clear_grammar);
recognizer.LoadGrammarAsync(_insert_grammar);
recognizer.LoadGrammarAsync(_connect_grammar);
recognizer.LoadGrammarAsync(_command_starter);
while (true)
{
recognizer.Recognize();
//recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
}