本文整理汇总了C#中SpeechRecognitionEngine.UnloadAllGrammars方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.UnloadAllGrammars方法的具体用法?C# SpeechRecognitionEngine.UnloadAllGrammars怎么用?C# SpeechRecognitionEngine.UnloadAllGrammars使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.UnloadAllGrammars方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: InitializeSpeechRecognitionEngine
public void InitializeSpeechRecognitionEngine(String filePath)
{
MySpeechRecognitionEngine = new SpeechRecognitionEngine();
//MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();
MySpeechRecognitionEngine.UnloadAllGrammars();
try
{
MySpeechRecognitionEngine.SetInputToWaveFile(filePath);
Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));
MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());
MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);
MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;
MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;
MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;
MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;
}
catch (Exception ex)
{
Console.Write(ex.Message.ToString());
}
}
示例2: InitializeSRE
public static SpeechRecognitionEngine InitializeSRE()
{
//Create the speech recognition engine
SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
using (sre)
{
//Set the audio device to the OS default
sre.SetInputToDefaultAudioDevice();
// Reset the Grammar
sre.UnloadAllGrammars();
// Load the plugins
LoadPlugins();
//Load all of the grammars
foreach (IJarvisPlugin plugin in _plugins)
sre.LoadGrammar(plugin.getGrammar());
//Set the recognition mode
sre.RecognizeAsync(RecognizeMode.Multiple);
//Add an event Handler
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
while (!Jarvis.JarvisMain.stop)
{
}
}
return sre;
}
示例3: Server
public Server()
{
Form = new CustomPerPixelAlphaForm();
FormSetProperties();
FormDock();
Form.Show();
var clientBuildDirectory = Environment.CurrentDirectory + "\\..\\..\\..\\..\\..\\Reflecta.Client\\bin";
var clientStartInfo = new ProcessStartInfo
{
FileName = clientBuildDirectory + "\\Client.exe",
WorkingDirectory = clientBuildDirectory,
WindowStyle = ProcessWindowStyle.Minimized
};
Client = Process.Start(clientStartInfo);
OpenPipes();
SpeechSynthesizer = new SpeechSynthesizer();
SpeechSynthesizer.SelectVoiceByHints(VoiceGender.Female);
SpeechSynthesizer.SpeakStarted += SpeechSynthesizer_SpeakStarted;
SpeechSynthesizer.VisemeReached += SpeechSynthesizer_VisemeReached;
SpeechSynthesizer.SpeakCompleted += SpeechSynthesizer_SpeakCompleted;
SpeechRecognitionEngine = new SpeechRecognitionEngine();
SpeechRecognitionEngine.UnloadAllGrammars();
SpeechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(KnownCommands)));
SpeechRecognitionEngine.SpeechRecognized += SpeechRecognitionEngine_SpeechRecognized;
SpeechRecognitionEngine.SetInputToDefaultAudioDevice();
SpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
KinectSensor = KinectSensor.GetDefault();
KinectSensor.Open();
BodyFrameSource = KinectSensor.BodyFrameSource;
BodyFrameReader = BodyFrameSource.OpenReader();
BodyFrameReader.FrameArrived += BodyFrameReader_FrameArrived;
Bodies = null;
BodyDESP = new DESPQuaternion[(int) MoCapKinectBone.Count];
for (var i = 0; i < (int) MoCapKinectBone.Count; i++)
BodyDESP[i] = new DESPQuaternion();
HighDefinitionFaceFrameSource = new HighDefinitionFaceFrameSource(KinectSensor);
HighDefinitionFaceFrameSource.TrackingQuality = FaceAlignmentQuality.High;
HighDefinitionFaceFrameReader = HighDefinitionFaceFrameSource.OpenReader();
HighDefinitionFaceFrameReader.FrameArrived += HighDefinitionFaceFrameReader_FrameArrived;
FaceAlignment = new FaceAlignment();
FaceDESP = new DESPQuaternion();
FaceExpressionDESP = new DESPFloat[(int) MoCapKinectFacialExpression.Count];
for (var i = 0; i < (int) MoCapKinectFacialExpression.Count; i++)
FaceExpressionDESP[i] = new DESPFloat();
}
示例4: MarathonForm
public MarathonForm()
{
AutoScaleDimensions = new SizeF(6F, 13F);
AutoScaleMode = AutoScaleMode.Font;
FormBorderStyle = FormBorderStyle.None;
WindowState = FormWindowState.Maximized;
Name = "Marathon des millions";
// Rajout des choix à la grammaire du prog
var grammar = new GrammarBuilder();
grammar.Append(new Choices("1", "2", "3", "4", "Yes", "No"));
recognizer = new SpeechRecognitionEngine();
recognizer.SetInputToDefaultAudioDevice();
recognizer.UnloadAllGrammars();
recognizer.LoadGrammar(new Grammar(grammar));
recognizer.SpeechRecognized += SpeechRecognized;
recognizer.AudioLevelUpdated += AudioLevelUpdated;
recognizer.RecognizeAsync(RecognizeMode.Multiple);
// create a new instance of the Wiimote
wm = new Wiimote();
scorePanel = new ScorePanel();
gamePanel = new GamePanel(wm);
questionPanel = new QuestionPanel(gamePanel);
// setup the event to handle state changes
wm.WiimoteChanged += WiimoteChanged;
// setup the event to handle insertion/removal of extensions
wm.WiimoteExtensionChanged += WiimoteExtensionChanged;
// connect to the Wiimote
wm.Connect();
// set the report type to return the IR sensor and accelerometer data (buttons always come back)
wm.SetReportType(InputReport.IRAccel, IRSensitivity.WiiLevel5, true);
Layout += MarathonLayout;
Controls.Add(questionPanel);
Controls.Add(scorePanel);
Controls.Add(gamePanel);
questionPanel.Start();
}
示例5: Base
public Base()
{
mySpeechEngine = new SpeechRecognitionEngine();
mySpeechDictionary = new SpeechDictionary();
mySpeaker = new SpeechSynthesizer();
mySpeechEngine.UnloadAllGrammars();
GrammarBuilder builder = new GrammarBuilder();
List<string[]> WordList = mySpeechDictionary.GetPhraseList();
for (int i = 0; i < WordList.Count; i++)
{
builder.Append(new Choices(WordList[i]));
}
myGrammar = new Grammar(builder);
mySpeechEngine.LoadGrammar(myGrammar);
mySpeechEngine.SetInputToDefaultAudioDevice();
}
示例6: button1_Click
private void button1_Click(object sender, EventArgs e)
{
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
Grammar dictationGrammar = new DictationGrammar();
recognizer.LoadGrammar(dictationGrammar);
try
{
button1.Text = "Speak Now";
recognizer.SetInputToDefaultAudioDevice();
RecognitionResult result = recognizer.Recognize();
button1.Text = result.Text;
}
catch (InvalidOperationException exception)
{
button1.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
}
finally
{
recognizer.UnloadAllGrammars();
}
}
示例7: button2_Click
private void button2_Click(object sender, EventArgs e)
{
SpeechRecognitionEngine engineSpeech = new SpeechRecognitionEngine();
Grammar gram = new DictationGrammar();
engineSpeech.LoadGrammar(gram);
try
{
engineSpeech.SetInputToDefaultAudioDevice();
RecognitionResult result = engineSpeech.Recognize();
label1.Text = result.Text;
textBox1.Text = result.Text;
button1_Click(new Object(),new EventArgs());
}
catch (Exception)
{
//Do nothing
}
finally
{
engineSpeech.UnloadAllGrammars();
}
}
示例8: recogCity
void recogCity(Object sender, SpeechRecognizedEventArgs e)
{
recognizer = new SpeechRecognitionEngine();
recognizer.SetInputToDefaultAudioDevice();
obj.SpeakAsync("You have Chosen: " + e.Result.Text);
city = e.Result.Text;
if (c1.Content.ToString().Equals(city.ToString(), StringComparison.OrdinalIgnoreCase))
{
c1.IsChecked = true;
}
else if (c2.Content.ToString().Equals(city.ToString(), StringComparison.OrdinalIgnoreCase))
{
c2.IsChecked = true;
}
else if (c3.Content.ToString().Equals(city.ToString(), StringComparison.OrdinalIgnoreCase))
{
c3.IsChecked = true;
}
else if (c4.Content.ToString().Equals(city.ToString(), StringComparison.OrdinalIgnoreCase))
{
c4.IsChecked = true;
}
else if (c5.Content.ToString().Equals(city.ToString(), StringComparison.OrdinalIgnoreCase))
{
c5.IsChecked = true;
}
else
{
Console.Write("nai mila");
}
Choices choices = new Choices("yes", "no");
GrammarBuilder grammarBuilder = new GrammarBuilder(choices);
obj.SpeakAsync("Say yes or no");
Grammar grammar = new Grammar(grammarBuilder);
recognizer.UnloadAllGrammars();
recognizer.LoadGrammar(grammar);
Thread.Sleep(4000);
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(cityConf);
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:42,代码来源:call.xaml.cs
示例9: SpeechToText
/// <summary>
/// Reason : To get speech to text data for given no of files
/// </summary>
/// <param name="audioFilePath"></param>
/// <param name="noOfAudioFiles"></param>
/// <param name="audioMessage"></param>
private void SpeechToText(string audioFilePath,int noOfAudioFiles, ref string audioMessage)
{
_recognizer = new SpeechRecognitionEngine();
Grammar dictationGrammar = new DictationGrammar();
_recognizer.LoadGrammar(dictationGrammar);
audioContentMessage = "";
try
{
for (int i = 1; i < noOfAudioFiles; i++)
{
try
{
Task task = Task.Factory.StartNew(() => codeBlock(audioFilePath + i + ".wav", noOfAudioFiles, _recognizer));
task.Wait(timeSpan);
}
catch
{
}
}
audioMessage = audioContentMessage;
}
catch (InvalidOperationException)
{
audioMessage = "Could not recognize input audio.\r\n";
}
finally
{
_recognizer.UnloadAllGrammars();
}
}
示例10: button10_Click_1
private void button10_Click_1(object sender, EventArgs e)
{
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
Choices colors = new Choices();
colors.Add(new string[] { "red", "green", "blue", "who", "mehak" });
GrammarBuilder gb = new GrammarBuilder();
gb.Append(colors);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
try
{
recognizer.SetInputToDefaultAudioDevice();
RecognitionResult result = recognizer.Recognize();
if (result == null)
{
label7.Text = "";
}
else
{
label7.Text = result.Text;
if (label7.Text == "red")
{
button3.PerformClick();
}
}
}
catch (InvalidOperationException exception)
{
button1.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
}
finally
{
recognizer.UnloadAllGrammars();
}
}
示例11: InitRecognitionEngine
private void InitRecognitionEngine()
{
//using (SpeechRecognizer recognizer = new System.Windows.Media.SpeechRecognition.SpeechRecognizer())
return;
var cultureInfo = new CultureInfo("ru-RU");
//var cultureInfo = new CultureInfo("en-US");
Thread.CurrentThread.CurrentCulture = cultureInfo;
Thread.CurrentThread.CurrentUICulture = cultureInfo;
/*
•en-GB. English (United Kingdom)
•en-US. English (United States)
•de-DE. German (Germany)
•es-ES. Spanish (Spain)
•fr-FR. French (France)
•ja-JP. Japanese (Japan)
•zh-CN. Chinese (China)
•zh-TW. Chinese (Taiwan)
*/
var commands = LoadAllCommands();
var choices = new Choices(commands);
var builder = new GrammarBuilder(choices);
builder.Culture = cultureInfo;
recognitionEngine = new SpeechRecognitionEngine();// (cultureInfo);
recognitionEngine.SetInputToDefaultAudioDevice();
recognitionEngine.UnloadAllGrammars();
recognitionEngine.LoadGrammar(new Grammar(builder));
//recognitionEngine.LoadGrammar(new DictationGrammar()); // любой текст
recognitionEngine.SpeechRecognized += recognitionEngine_SpeechRecognized;
recognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
}
示例12: enqRecog
void enqRecog(object sender, SpeechRecognizedEventArgs e)
{
obj.SpeakAsync("Chosen:"+e.Result.Text);
enq_check = int.Parse( e.Result.Text);
recognizer = new SpeechRecognitionEngine();
recognizer.SetInputToDefaultAudioDevice();
obj.SpeakAsync("Say yes or no");
recognizer.RecognizeAsyncStop();
Choices choices = new Choices("yes", "no");
GrammarBuilder grammarBuilder = new GrammarBuilder(choices);
Grammar grammar = new Grammar(grammarBuilder);
recognizer.UnloadAllGrammars();
recognizer.LoadGrammar(grammar);
Thread.Sleep(3000);
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(enqCheck);
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:20,代码来源:call.xaml.cs
示例13: ConstructorSetup
private void ConstructorSetup(CultureInfo idioma)
{
_speechRecognitionEngine = new SpeechRecognitionEngine(idioma);
_speechRecognitionEngine.UnloadAllGrammars();
}
示例14: MainForm_Load
private void MainForm_Load(object sender, EventArgs e)
{
this.splitContainer1.Dock = DockStyle.Fill;
this.splitContainer2.Dock = DockStyle.Fill;
this.lvMessages.Dock = DockStyle.Fill;
this.tbLog.Dock = DockStyle.Fill;
this.btnDisconnect.Enabled = false;
//generic server
_logger.Log("Initializing Client Module...");
_server = new CGenericServerWrapper();
_server.NetworkID = 1;
_server.Init("Voice.log");
_logger.Log("Client Module initialized successfuly");
//voice
bool error = false;
_logger.Log("Initializing Voice recognition Module...");
recognizer = new SpeechRecognitionEngine();
synthesizer = new SpeechSynthesizer();
System.Collections.ObjectModel.ReadOnlyCollection<InstalledVoice> voices = synthesizer.GetInstalledVoices();
try
{
recognizer.SetInputToDefaultAudioDevice();
recognizer.UnloadAllGrammars();
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
SetVoice();
}
catch (ArgumentException ae)
{
error = true;
_logger.Log(String.Format("Error in Voice recognition module initialization: {0}. ", ae.Message));
MessageBox.Show(ae.Message);
}
catch (Exception ex)
{
error = true;
_logger.Log(String.Format("Error in Voice recognition module initialization: {0}. ", ex.Message));
MessageBox.Show(ex.Message, "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
if (!error)
{
_logger.Log("Voice recognition Module initialized successfuly");
}
}
示例15: icers_carnamerecognized
void icers_carnamerecognized(object sender, SpeechRecognizedEventArgs e)
{
recognizer = new SpeechRecognitionEngine();
recognizer.SetInputToDefaultAudioDevice();
obj.SpeakAsync("You have chosen: " + e.Result.Text);
obj.SpeakAsync("Say yes or no");
recognizer.RecognizeAsyncStop();
carmodel = e.Result.Text;
if (model1.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
{
model1.IsChecked = true;
}
if (model2.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
{
model2.IsChecked = true;
}
if (model3.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
{
model3.IsChecked = true;
}
if (model4.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
{
model4.IsChecked = true;
}
if (model5.Content.ToString().Equals(e.Result.Text.ToString(), StringComparison.OrdinalIgnoreCase))
{
model5.IsChecked = true;
}
Choices choices = new Choices("yes", "no");
GrammarBuilder grammarBuilder = new GrammarBuilder(choices);
Grammar grammar = new Grammar(grammarBuilder);
recognizer.UnloadAllGrammars();
recognizer.LoadGrammar(grammar);
Thread.Sleep(4000);
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(car_check);
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:40,代码来源:call.xaml.cs