本文整理汇总了C#中SpeechRecognitionEngine.LoadGrammar方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.LoadGrammar方法的具体用法?C# SpeechRecognitionEngine.LoadGrammar怎么用?C# SpeechRecognitionEngine.LoadGrammar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.LoadGrammar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SpeechConversation
public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
{
SessionStorage = new SessionStorage();
if(speechSynthesizer==null)
{
speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.SetOutputToDefaultAudioDevice();
}
_speechSynthesizer = speechSynthesizer;
if(speechRecognition==null)
{
speechRecognition = new SpeechRecognitionEngine(
new System.Globalization.CultureInfo("en-US")
);
// Create a default dictation grammar.
DictationGrammar defaultDictationGrammar = new DictationGrammar();
defaultDictationGrammar.Name = "default dictation";
defaultDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(defaultDictationGrammar);
// Create the spelling dictation grammar.
DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
spellingDictationGrammar.Name = "spelling dictation";
spellingDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(spellingDictationGrammar);
// Configure input to the speech recognizer.
speechRecognition.SetInputToDefaultAudioDevice();
}
_speechRecognition = speechRecognition;
}
示例2: Start
public void Start(ListenContext listenCtx, SpeechRecognitionEngine sre)
{
_listenCtx = listenCtx;
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Resources.RecogPlay)))
sre.LoadGrammar(new Grammar(memoryStream));
var gb = new GrammarBuilder { Culture = new CultureInfo("en-US") };
gb.Append(_choices);
sre.LoadGrammar(new Grammar(gb));
_lastKnownGood = null;
}
示例3: AddGrammars
void AddGrammars(SpeechRecognitionEngine recognizer)
{
Grammar dictationGrammar = BuildGrammar();
Grammar mulliganGrammer = MakeRepeatedGrammar(new string[] { "mulligan" }, new string[] { "1", "2", "3", "4", "confirm" }, 99);
Grammar moveGrammar = MakeMoveGrammar();
Grammar removeGrammar = RemoveCardGrammar();
recognizer.LoadGrammar(dictationGrammar);
recognizer.LoadGrammar(mulliganGrammer);
recognizer.LoadGrammar(moveGrammar);
recognizer.LoadGrammar(removeGrammar);
}
示例4: button1_Click
private void button1_Click(object sender, EventArgs e)
{
_completed = new ManualResetEvent(false);
SpeechRecognitionEngine _recognizer = new SpeechRecognitionEngine();
_recognizer.RequestRecognizerUpdate(); // request for recognizer update
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("test"))); // load a grammar
_recognizer.RequestRecognizerUpdate(); // request for recognizer update
_recognizer.LoadGrammar(new Grammar(new GrammarBuilder("exit"))); // load a "exit" grammar
_recognizer.SpeechRecognized += _recognizer_SpeechRecognized;
_recognizer.SetInputToDefaultAudioDevice(); // set the input of the speech recognizer to the default audio device
_recognizer.RecognizeAsync(RecognizeMode.Multiple); // recognize speech asynchronous
_completed.WaitOne(); // wait until speech recognition is completed
_recognizer.Dispose(); // dispose the speech recognition engine
}
示例5: InitializeSRE
public static SpeechRecognitionEngine InitializeSRE()
{
//Create the speech recognition engine
SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
using (sre)
{
//Set the audio device to the OS default
sre.SetInputToDefaultAudioDevice();
// Reset the Grammar
sre.UnloadAllGrammars();
// Load the plugins
LoadPlugins();
//Load all of the grammars
foreach (IJarvisPlugin plugin in _plugins)
sre.LoadGrammar(plugin.getGrammar());
//Set the recognition mode
sre.RecognizeAsync(RecognizeMode.Multiple);
//Add an event Handler
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
while (!Jarvis.JarvisMain.stop)
{
}
}
return sre;
}
示例6: CreateSpeechRecognizer
//Speech recognizer
private SpeechRecognitionEngine CreateSpeechRecognizer()
{
RecognizerInfo ri = GetKinectRecognizer();
SpeechRecognitionEngine sre;
sre = new SpeechRecognitionEngine(ri.Id);
//words we need the program to recognise
var grammar = new Choices();
grammar.Add(new SemanticResultValue("moustache", "MOUSTACHE"));
grammar.Add(new SemanticResultValue("top hat", "TOP HAT"));
grammar.Add(new SemanticResultValue("glasses", "GLASSES"));
grammar.Add(new SemanticResultValue("sunglasses", "SUNGLASSES"));
grammar.Add(new SemanticResultValue("tie", "TIE"));
grammar.Add(new SemanticResultValue("bow", "BOW"));
grammar.Add(new SemanticResultValue("bear", "BEAR"));
//etc
var gb = new GrammarBuilder { Culture = ri.Culture };
gb.Append(grammar);
var g = new Grammar(gb);
sre.LoadGrammar(g);
//Events for recognising and rejecting speech
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
return sre;
}
示例7: SpeechRecognizer
public SpeechRecognizer(string file, KinectSensor sensor)
{
this.grammarFile = file;
this.kinectSensor = sensor;
audioSource = kinectSensor.AudioSource;
audioSource.AutomaticGainControlEnabled = false;
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
Func<RecognizerInfo, bool> matchingFunc = r =>
{
string value;
r.AdditionalInfo.TryGetValue("Kinect", out value);
return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
};
var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
if (recognizerInfo == null)
return;
speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
var grammar = new Grammar(grammarFile);
speechRecognitionEngine.LoadGrammar(grammar);
audioStream = audioSource.Start();
speechRecognitionEngine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
speechRecognitionEngine.AudioStateChanged += onAudioStateChanged;
speechRecognitionEngine.SpeechRecognized += onSpeechRecognized;
speechRecognitionEngine.RecognizeCompleted += onSpeechRecognizeCompleted;
speechRecognitionEngine.EmulateRecognizeCompleted += onEmulateRecognizeCompleted;
}
示例8: MainWindow
public MainWindow()
{
InitializeComponent();
var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
//config.Populate();
config.Save();
//config.Save(
return;
var engine = new SpeechRecognitionEngine();
var builder = new GrammarBuilder();
builder.Append( "tag" );
builder.Append( new Choices( "baron", "dragon" ) );
engine.RequestRecognizerUpdate();
engine.LoadGrammar( new Grammar( builder ) );
engine.SpeechRecognized += engine_SpeechRecognized;
engine.SetInputToDefaultAudioDevice();
engine.RecognizeAsync( RecognizeMode.Multiple );
CompositionTarget.Rendering += CompositionTarget_Rendering;
this.DataContext = this;
}
示例9: initRS
public void initRS()
{
try
{
SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
var words = new Choices();
words.Add("Hello");
words.Add("Jump");
words.Add("Left");
words.Add("Right");
var gb = new GrammarBuilder();
gb.Culture = new System.Globalization.CultureInfo("en-US");
gb.Append(words);
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
sre.SetInputToDefaultAudioDevice();
sre.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception e)
{
label1.Text = "init RS Error : " + e.ToString();
}
}
示例10: load_listen
public void load_listen(VI_Profile profile, VI_Settings settings, ListView statusContainer)
{
this.profile = profile;
this.settings = settings;
this.statusContainer = statusContainer;
vi_syn = profile.synth;
vi_syn.SelectVoice(settings.voice_info);
vi_sre = new SpeechRecognitionEngine(settings.recognizer_info);
GrammarBuilder phrases_grammar = new GrammarBuilder();
List<string> glossory = new List<string>();
foreach (VI_Phrase trigger in profile.Profile_Triggers)
{
glossory.Add(trigger.value);
}
if (glossory.Count == 0)
{
MessageBox.Show("You need to add at least one Trigger");
return;
}
phrases_grammar.Append(new Choices(glossory.ToArray()));
vi_sre.LoadGrammar(new Grammar(phrases_grammar));
//set event function
vi_sre.SpeechRecognized += phraseRecognized;
vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;
vi_sre.SetInputToDefaultAudioDevice();
vi_sre.RecognizeAsync(RecognizeMode.Multiple);
}
示例11: VoiceSelect
public VoiceSelect()
{
precision = .5;
newWordReady = false;
RecognizerInfo ri = GetKinectRecognizer();
SpeechRecognitionEngine tempSpeechRec;
tempSpeechRec = new SpeechRecognitionEngine(ri.Id);
var grammar = new Choices();
grammar.Add("select one", "SELECT ONE", "Select One");
grammar.Add("select two", "SELECT TWO", "Select Two");
grammar.Add("pause", "PAUSE");
grammar.Add("exit", "EXIT");
grammar.Add("single player", "SINGLE PLAYER");
grammar.Add("co op mode", "CO OP MODE");
grammar.Add("settings", "SETTINGS");
grammar.Add("instructions", "INSTRUCTIONS");
grammar.Add("statistics", "STATISTICS");
grammar.Add("Main Menu", "MAIN MENU");
grammar.Add("resume", "RESUME");
grammar.Add("restart level", "RESTART LEVEL");
grammar.Add("replay", "REPLAY");
grammar.Add("next", "NEXT");
grammar.Add("Easy", "EASY");
grammar.Add("Hard", "HARD");
/*
grammar.Add("level one");
grammar.Add("level two");
grammar.Add("level three");
grammar.Add("level four");
grammar.Add("level five");
grammar.Add("level six");
grammar.Add("player one left");
grammar.Add("player one right");
grammar.Add("player two left");
grammar.Add("player two right");
grammar.Add("room low");
grammar.Add("room medium");
grammar.Add("room high");
grammar.Add("sounds on");
grammar.Add("sounds off");
grammar.Add("reset stats");
*/
var gb = new GrammarBuilder { Culture = ri.Culture };
gb.Append(grammar);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
tempSpeechRec.LoadGrammar(g);
tempSpeechRec.SpeechRecognized += phraseRecognized;
tempSpeechRec.SpeechHypothesized += phraseHyphothesized;
tempSpeechRec.SpeechRecognitionRejected += phraseRejected;
speechRec = tempSpeechRec;
}
示例12: worker_DoWork
private void worker_DoWork(object sender, DoWorkEventArgs e)
{
Thread.CurrentThread.Name = "Kinect audio thread";
if(_device.Type == DeviceType.KINECT_1)
{
SpeechRecognizer = new SpeechRecognitionEngine(recognizerInfo.Id);
SpeechRecognizer.LoadGrammar(GetCurrentGrammar());
SpeechRecognizer.SpeechRecognized += SreSpeechRecognized;
SpeechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
SpeechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
//set sensor audio source to variable
audioSource = _device.sensor.AudioSource;
//Set the beam angle mode - the direction the audio beam is pointing
//we want it to be set to adaptive
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
//start the audiosource
var kinectStream = audioSource.Start();
//configure incoming audio stream
SpeechRecognizer.SetInputToAudioStream(
kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
//make sure the recognizer does not stop after completing
SpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
//reduce background and ambient noise for better accuracy
_device.sensor.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
_device.sensor.AudioSource.AutomaticGainControlEnabled = false;
audioStarted = true;
}
Console.WriteLine("END OF WORKER AUDIO");
}
示例13: VoiceInput
public VoiceInput()
{
recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US"));
recognizer.SetInputToDefaultAudioDevice();
Choices choices = new Choices();
foreach (String command in commands)
{
choices.Add(command);
}
choices.Add(startListening);
choices.Add(stopListening);
choices.Add(stop);
/*choices.Add("Close");
choices.Add("Left");
choices.Add("Right");
choices.Add("Tilt Left");
choices.Add("Tilt Right");
choices.Add("Move");
choices.Add("Back");
choices.Add("Move Up");
choices.Add("Down");
choices.Add("Exit");
choices.Add("Stop");
choices.Add("Start Listening");
choices.Add("Stop Listening");*/
Grammar grammar = new Grammar(new GrammarBuilder(choices));
recognizer.LoadGrammar(grammar);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
示例14: CreateSpeechRecognizer
//here is the fun part: create the speech recognizer
private SpeechRecognitionEngine CreateSpeechRecognizer()
{
//set recognizer info
RecognizerInfo ri = GetKinectRecognizer();
//create instance of SRE
SpeechRecognitionEngine sre;
sre = new SpeechRecognitionEngine(ri.Id);
//Now we need to add the words we want our program to recognise
var grammar = new Choices();
grammar.Add("Record");
grammar.Add("Store");
grammar.Add("Replay");
grammar.Add("Stop");
grammar.Add("Learn");
grammar.Add("Finish");
//set culture - language, country/region
var gb = new GrammarBuilder { Culture = ri.Culture };
gb.Append(grammar);
//set up the grammar builder
var g = new Grammar(gb);
sre.LoadGrammar(g);
//Set events for recognizing, hypothesising and rejecting speech
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
return sre;
}
示例15: Transcribe
public void Transcribe(MediaSegment segment)
{
using (var engine = new SpeechRecognitionEngine())
{
engine.LoadGrammar(new DictationGrammar());
engine.SetInputToWaveFile(segment.File.FullName);
var result = engine.Recognize();
var metaDatum = new Metadata();
metaDatum.Start = result.Audio.AudioPosition.TotalMilliseconds + segment.OffsetMs;
metaDatum.End = metaDatum.Start + segment.DurationMs;
metaDatum.EngineMetadata = new SpeechResults
{
Text = result.Text,
Confidence = result.Confidence
};
_concurrentDictionary.AddOrUpdate(segment.FileId, new List<Metadata> {metaDatum}, (x, y) =>
{
y.Add(metaDatum);
return y;
});
}
}