本文整理汇总了C#中SpeechRecognitionEngine类的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine类的具体用法?C# SpeechRecognitionEngine怎么用?C# SpeechRecognitionEngine使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SpeechRecognitionEngine类属于命名空间,在下文中一共展示了SpeechRecognitionEngine类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: CreateSpeechRecognizer
//Speech recognizer
private SpeechRecognitionEngine CreateSpeechRecognizer()
{
RecognizerInfo ri = GetKinectRecognizer();
SpeechRecognitionEngine sre;
sre = new SpeechRecognitionEngine(ri.Id);
//words we need the program to recognise
var grammar = new Choices();
grammar.Add(new SemanticResultValue("moustache", "MOUSTACHE"));
grammar.Add(new SemanticResultValue("top hat", "TOP HAT"));
grammar.Add(new SemanticResultValue("glasses", "GLASSES"));
grammar.Add(new SemanticResultValue("sunglasses", "SUNGLASSES"));
grammar.Add(new SemanticResultValue("tie", "TIE"));
grammar.Add(new SemanticResultValue("bow", "BOW"));
grammar.Add(new SemanticResultValue("bear", "BEAR"));
//etc
var gb = new GrammarBuilder { Culture = ri.Culture };
gb.Append(grammar);
var g = new Grammar(gb);
sre.LoadGrammar(g);
//Events for recognising and rejecting speech
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
return sre;
}
示例2: SpeechRecognizer
public SpeechRecognizer(string file, KinectSensor sensor)
{
this.grammarFile = file;
this.kinectSensor = sensor;
audioSource = kinectSensor.AudioSource;
audioSource.AutomaticGainControlEnabled = false;
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
Func<RecognizerInfo, bool> matchingFunc = r =>
{
string value;
r.AdditionalInfo.TryGetValue("Kinect", out value);
return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
};
var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
if (recognizerInfo == null)
return;
speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
var grammar = new Grammar(grammarFile);
speechRecognitionEngine.LoadGrammar(grammar);
audioStream = audioSource.Start();
speechRecognitionEngine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
speechRecognitionEngine.AudioStateChanged += onAudioStateChanged;
speechRecognitionEngine.SpeechRecognized += onSpeechRecognized;
speechRecognitionEngine.RecognizeCompleted += onSpeechRecognizeCompleted;
speechRecognitionEngine.EmulateRecognizeCompleted += onEmulateRecognizeCompleted;
}
示例3: InitializeSRE
public static SpeechRecognitionEngine InitializeSRE()
{
//Create the speech recognition engine
SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
using (sre)
{
//Set the audio device to the OS default
sre.SetInputToDefaultAudioDevice();
// Reset the Grammar
sre.UnloadAllGrammars();
// Load the plugins
LoadPlugins();
//Load all of the grammars
foreach (IJarvisPlugin plugin in _plugins)
sre.LoadGrammar(plugin.getGrammar());
//Set the recognition mode
sre.RecognizeAsync(RecognizeMode.Multiple);
//Add an event Handler
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
while (!Jarvis.JarvisMain.stop)
{
}
}
return sre;
}
示例4: MainWindow
public MainWindow()
{
InitializeComponent();
var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
//config.Populate();
config.Save();
//config.Save(
return;
var engine = new SpeechRecognitionEngine();
var builder = new GrammarBuilder();
builder.Append( "tag" );
builder.Append( new Choices( "baron", "dragon" ) );
engine.RequestRecognizerUpdate();
engine.LoadGrammar( new Grammar( builder ) );
engine.SpeechRecognized += engine_SpeechRecognized;
engine.SetInputToDefaultAudioDevice();
engine.RecognizeAsync( RecognizeMode.Multiple );
CompositionTarget.Rendering += CompositionTarget_Rendering;
this.DataContext = this;
}
示例5: SpeechRecogniser
public SpeechRecogniser()
{
RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
if (ri == null)
return;
sre = new SpeechRecognitionEngine(ri.Id);
// Build a simple grammar of shapes, colors, and some simple program control
var instruments = new Choices();
foreach (var phrase in InstrumentPhrases)
instruments.Add(phrase.Key);
var objectChoices = new Choices();
objectChoices.Add(instruments);
var actionGrammar = new GrammarBuilder();
//actionGrammar.AppendWildcard();
actionGrammar.Append(objectChoices);
var gb = new GrammarBuilder();
gb.Append(actionGrammar);
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += sre_SpeechRecognized;
sre.SpeechHypothesized += sre_SpeechHypothesized;
sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);
var t = new Thread(StartDMO);
t.Start();
valid = true;
}
示例6: MainWindow
public MainWindow()
{
InitializeComponent();
NewGame();
try
{
var sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
}
catch (Exception ex)
{
MessageBox.Show("Печаль, печаль, печаль.\nНе хочу тебя расстраивать дружище, но кажется на твоем компе не установлен Speech Recognition для английского языка.\nУстанови его, и может быть удача улыбнется тебе.");
Close();
}
allCommand.Text = @"Aa [ ei ] [эй]
Bb [ bi: ] [би]
Cc [ si: ] [си]
Dd [ di: ] [ди]
Ee [ i: ] [и]
Ff [ ef ] [эф]
Gg [ dʒi: ] [джи]
Hh [ eitʃ ] [эйч]
Ii [ ai ] [ай]
Jj [ dʒei ] [джей]";
Task.Factory.StartNew(Run);
}
示例7: CreateSpeechRecognizer
//here is the fun part: create the speech recognizer
private SpeechRecognitionEngine CreateSpeechRecognizer()
{
//set recognizer info
RecognizerInfo ri = GetKinectRecognizer();
//create instance of SRE
SpeechRecognitionEngine sre;
sre = new SpeechRecognitionEngine(ri.Id);
//Now we need to add the words we want our program to recognise
var grammar = new Choices();
grammar.Add("hello");
grammar.Add("goodbye");
//set culture - language, country/region
var gb = new GrammarBuilder { Culture = ri.Culture };
gb.Append(grammar);
//set up the grammar builder
var g = new Grammar(gb);
sre.LoadGrammar(g);
//Set events for recognizing, hypothesising and rejecting speech
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
return sre;
}
示例8: InitializeSpeechRecognitionEngine
public void InitializeSpeechRecognitionEngine(String filePath)
{
MySpeechRecognitionEngine = new SpeechRecognitionEngine();
//MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();
MySpeechRecognitionEngine.UnloadAllGrammars();
try
{
MySpeechRecognitionEngine.SetInputToWaveFile(filePath);
Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));
MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());
MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);
MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;
MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;
MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;
MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;
}
catch (Exception ex)
{
Console.Write(ex.Message.ToString());
}
}
示例9: initRS
public void initRS()
{
try
{
SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
var words = new Choices();
words.Add("Hello");
words.Add("Jump");
words.Add("Left");
words.Add("Right");
var gb = new GrammarBuilder();
gb.Culture = new System.Globalization.CultureInfo("en-US");
gb.Append(words);
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
sre.SetInputToDefaultAudioDevice();
sre.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception e)
{
label1.Text = "init RS Error : " + e.ToString();
}
}
示例10: StartListening
public void StartListening()
{
if (null != _ri)
{
_speechEngine = new SpeechRecognitionEngine(_ri.Id);
// Create a grammar from grammar definition XML file.
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
{
var g = new Grammar(memoryStream);
_speechEngine.LoadGrammar(g);
}
_speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
_speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;
// let the convertStream know speech is going active
_convertStream.SpeechActive = true;
_speechEngine.SetInputToAudioStream(
_convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
_speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
}
示例11: InicializeSpeechRecognize
public void InicializeSpeechRecognize()
{
RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
throw new RecognizerNotFoundException();
}
try
{
_sre = new SpeechRecognitionEngine(ri.Id);
}
catch(Exception e)
{
Console.WriteLine(e.Message);
throw e;
}
var choises = new Choices();
foreach(CommandSpeechRecognition cmd in _commands.Values)
{
choises.Add(cmd.Choise);
}
var gb = new GrammarBuilder {Culture = ri.Culture};
gb.Append(choises);
var g = new Grammar(gb);
_sre.LoadGrammar(g);
_sre.SpeechRecognized += SreSpeechRecognized;
_sre.SpeechHypothesized += SreSpeechHypothesized;
_sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
}
示例12: BuildSpeechEngine
void BuildSpeechEngine(RecognizerInfo rec)
{
_speechEngine = new SpeechRecognitionEngine(rec.Id);
var choices = new Choices();
choices.Add("venus");
choices.Add("mars");
choices.Add("earth");
choices.Add("jupiter");
choices.Add("sun");
var gb = new GrammarBuilder { Culture = rec.Culture };
gb.Append(choices);
var g = new Grammar(gb);
_speechEngine.LoadGrammar(g);
//recognized a word or words that may be a component of multiple complete phrases in a grammar.
_speechEngine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(SpeechEngineSpeechHypothesized);
//receives input that matches any of its loaded and enabled Grammar objects.
_speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(_speechEngineSpeechRecognized);
//receives input that does not match any of its loaded and enabled Grammar objects.
_speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(_speechEngineSpeechRecognitionRejected);
//C# threads are MTA by default and calling RecognizeAsync in the same thread will cause an COM exception.
var t = new Thread(StartAudioStream);
t.Start();
}
示例13: SpeechConversation
public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
{
SessionStorage = new SessionStorage();
if(speechSynthesizer==null)
{
speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.SetOutputToDefaultAudioDevice();
}
_speechSynthesizer = speechSynthesizer;
if(speechRecognition==null)
{
speechRecognition = new SpeechRecognitionEngine(
new System.Globalization.CultureInfo("en-US")
);
// Create a default dictation grammar.
DictationGrammar defaultDictationGrammar = new DictationGrammar();
defaultDictationGrammar.Name = "default dictation";
defaultDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(defaultDictationGrammar);
// Create the spelling dictation grammar.
DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
spellingDictationGrammar.Name = "spelling dictation";
spellingDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(spellingDictationGrammar);
// Configure input to the speech recognizer.
speechRecognition.SetInputToDefaultAudioDevice();
}
_speechRecognition = speechRecognition;
}
示例14: GetSpeechRecognitionEngine
private static SpeechRecognitionEngine GetSpeechRecognitionEngine()
{
var sre = new SpeechRecognitionEngine();
sre.LoadGrammar(new DictationGrammar());
sre.SetInputToDefaultAudioDevice();
sre.SpeechRecognized += (s, e) =>
{
if (e.Result != null &&
!String.IsNullOrEmpty(e.Result.Text))
{
using (new ConsoleForegroundColor(ConsoleColor.Green))
{
Console.WriteLine(e.Result.Text);
}
return;
}
using (new ConsoleForegroundColor(ConsoleColor.Red))
{
Console.WriteLine("Recognized text not available.");
}
};
//sr.SpeechRecognized += SpeechRecognizedHandler;
return sre;
}
示例15: SpeechInput
public SpeechInput(Settings settings, MusicList musicCollection, string playerPath) {
ModeTimer = new CommandModeTimer();
RNG = new Random();
AppSettings = settings;
SRecognize = new SpeechRecognitionEngine();
Player = new Aimp3Player(playerPath);
if(musicCollection != null) {
MusicCollection = musicCollection;
} else {
throw new ArgumentNullException(nameof(musicCollection));
}
InitCommands();
try {
LoadGrammar();
SRecognize.SetInputToDefaultAudioDevice();
SRecognize.RecognizeAsync(RecognizeMode.Multiple);
} catch(Exception e) {
System.Windows.Forms.MessageBox.Show("Error while starting SpeechInput\n" + e.ToString());
}
SRecognize.SpeechRecognized += SRecognize_SpeechRecognized;
MusicCollection.SongListUpdated += (s, a) => LoadGrammar();
}