本文整理汇总了C#中SpeechRecognitionEngine.RecognizeAsyncStop方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.RecognizeAsyncStop方法的具体用法?C# SpeechRecognitionEngine.RecognizeAsyncStop怎么用?C# SpeechRecognitionEngine.RecognizeAsyncStop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.RecognizeAsyncStop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Main
static void Main()
{
using (var source = new KinectAudioSource())
{
source.FeatureMode = true;
source.AutomaticGainControl = false;
source.SystemMode = SystemMode.OptibeamArrayOnly;
RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
return;
}
Console.WriteLine("Using: {0}", ri.Name);
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
//declare commands to be used
var commands = new Choices();
commands.Add("activate");
commands.Add("off");
commands.Add("open");
commands.Add("manual");
commands.Add("hold");
commands.Add("land");
commands.Add("stabilize");
var gb = new GrammarBuilder {Culture = ri.Culture};
//Specify the culture to match the recognizer in case we are running in a different culture.
gb.Append(commands);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechRecognitionRejected += SreSpeechRejected;
using (Stream s = source.Start())
{
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
Console.WriteLine("Recognizing... Press ENTER to stop");
sre.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("Stopping recognizer ...");
sre.RecognizeAsyncStop();
}
}
}
}
示例2: main_menu
public void main_menu()
{
recognizer = new SpeechRecognitionEngine();
obj.SpeakAsync("Hello My name is ICERS, What can i do for you: 1 for checking old enquiries, 2 for entering new enquiry, and 3 for latest car analytics...");
Thread.Sleep(10000);
recognizer.RecognizeAsyncStop();
GrammarBuilder gb = new GrammarBuilder();
Choices menu = new Choices();
menu.Add(new string[] { "one", "two", "three"});
gb.Append(menu);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(mainRecog);
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:24,代码来源:call.xaml.cs
示例3: SpeechRecognizer
private SpeechRecognizer(KinectSensor kinect)
{
RecognizerInfo ri = GetKinectRecognizer();
this.speechRecognizer = new SpeechRecognitionEngine(ri);
// Obtain the KinectAudioSource to do audio capture
KinectAudioSource source = kinect.AudioSource;
source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition
//this.LoadGrammar(kinect);
// }
/*public static void Main(string[] args)
{
// Obtain a KinectSensor if any are available
KinectSensor sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
if (sensor == null)
{
Console.WriteLine(
"No Kinect sensors are attached to this computer or none of the ones that are\n" +
"attached are \"Connected\".\n" +
"Attach the KinectSensor and restart this application.\n" +
"If that doesn't work run SkeletonViewer-WPF to better understand the Status of\n" +
"the Kinect sensors.\n\n" +
"Press any key to continue.\n");
// Give a chance for user to see console output before it is dismissed
Console.ReadKey(true);
return;
}
sensor.Start();
// Obtain the KinectAudioSource to do audio capture
KinectAudioSource source = sensor.AudioSource;
source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition
// */
// private void LoadGrammar(KinectSensor kinect)
//{
// RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
return;
}
Console.WriteLine("Using: {0}", ri.Name);
// NOTE: Need to wait 4 seconds for device to be ready right after initialization
int wait = 4;
while (wait > 0)
{
Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--);
Thread.Sleep(1000);
}
//using (var sre = new SpeechRecognitionEngine(ri.Id))
//{
// speechRecognizer
var colors = new Choices();
colors.Add("red");
colors.Add("green");
colors.Add("blue");
var gb = new GrammarBuilder { Culture = ri.Culture };
// Specify the culture to match the recognizer in case we are running in a different culture.
gb.Append(colors);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
speechRecognizer.LoadGrammar(g);
speechRecognizer.SpeechRecognized += SreSpeechRecognized;
speechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
speechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
speechRecognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(SpeechDetectedHandler);
Console.WriteLine("IN Speech Reconizer load function");
// speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
///*
using (Stream s = kinect.AudioSource.Start())
{
speechRecognizer.SetInputToAudioStream(
s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
Console.WriteLine("Recognizing speech. Say: 'red', 'green' or 'blue'. Press ENTER to stop");
speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("Stopping recognizer ...");
speechRecognizer.RecognizeAsyncStop();
}
//* */
// }
}
示例4: Main
static void Main( string[] args )
{
try {
using ( var source = new KinectAudioSource() ) {
source.FeatureMode = true;
source.AutomaticGainControl = false; //Important to turn this off for speech recognition
source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample
RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where( r => "ja-JP".Equals( r.Culture.Name, StringComparison.InvariantCultureIgnoreCase ) ).FirstOrDefault();
if ( ri == null ) {
Console.WriteLine( "Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId );
return;
}
Console.WriteLine( "Using: {0}", ri.Name );
using ( var sre = new SpeechRecognitionEngine( ri.Id ) ) {
GrammarBuilder dictaphoneGB = new GrammarBuilder();
GrammarBuilder dictation = new GrammarBuilder();
dictation.AppendDictation();
dictaphoneGB.Append( new SemanticResultKey( "StartDictation", new SemanticResultValue( "Start Dictation", true ) ) );
dictaphoneGB.Append( new SemanticResultKey( "dictationInput", dictation ) );
dictaphoneGB.Append( new SemanticResultKey( "EndDictation", new SemanticResultValue( "Stop Dictation", false ) ) );
GrammarBuilder spellingGB = new GrammarBuilder();
GrammarBuilder spelling = new GrammarBuilder();
spelling.AppendDictation( "spelling" );
spellingGB.Append( new SemanticResultKey( "StartSpelling", new SemanticResultValue( "Start Spelling", true ) ) );
spellingGB.Append( new SemanticResultKey( "spellingInput", spelling ) );
spellingGB.Append( new SemanticResultKey( "StopSpelling", new SemanticResultValue( "Stop Spelling", true ) ) );
GrammarBuilder both = GrammarBuilder.Add( (GrammarBuilder)new SemanticResultKey( "Dictation", dictaphoneGB ),
(GrammarBuilder)new SemanticResultKey( "Spelling", spellingGB ) );
Grammar grammar = new Grammar( new SemanticResultKey( "Dictation", dictaphoneGB ) );
grammar.Enabled = true;
grammar.Name = "Dictaphone and Spelling ";
sre.LoadGrammar( grammar ); // Exception thrown here
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using ( Stream s = source.Start() ) {
sre.SetInputToAudioStream( s, new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null ) );
Console.WriteLine( "Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop" );
sre.RecognizeAsync( RecognizeMode.Multiple );
Console.ReadLine();
Console.WriteLine( "Stopping recognizer ..." );
sre.RecognizeAsyncStop();
}
}
}
}
catch ( Exception ex ) {
Console.WriteLine( ex.Message );
}
}
示例5: RecognitionStart
private static void RecognitionStart(KinectAudioSource source, SpeechRecognitionEngine sre)
{
using (Stream s = source.Start())
{
sre.SetInputToAudioStream(
s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
Console.WriteLine("�л� �A�n��? �� �A�X��?�C ���U ENTER �������");
sre.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("������� ...");
sre.RecognizeAsyncStop();
}
}
示例6: Window_Loaded
private void Window_Loaded(object sender, EventArgs e)
{
nui = new Runtime();
try
{
nui.Initialize(RuntimeOptions.UseDepthAndPlayerIndex | RuntimeOptions.UseSkeletalTracking | RuntimeOptions.UseColor);
}
catch (InvalidOperationException)
{
System.Windows.MessageBox.Show("Runtime initialization failed. Please make sure Kinect device is plugged in.");
return;
}
consoleFrame.Text = "Window_Loaded";
using (var source = new KinectAudioSource())
{
source.FeatureMode = true;
source.AutomaticGainControl = false; //Important to turn this off for speech recognition
source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample
RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
if (ri == null)
{
Console.WriteLine("Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId);
return;
}
consoleFrame.Text = "Using:" + ri.Name;
sre = new SpeechRecognitionEngine(ri.Id);
var orders = new Choices();
orders.Add("up");
orders.Add("down");
orders.Add("center");
var gb = new GrammarBuilder();
gb.Culture = ri.Culture;
gb.Append(orders);
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
var s = source.Start();
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
sre.RecognizeAsync(RecognizeMode.Multiple);
sre.RecognizeAsyncStop();
}
consoleFrame.Text += "\n Recognizing started. Say up, down or center";
try
{
nui.VideoStream.Open(ImageStreamType.Video, 2, ImageResolution.Resolution640x480, ImageType.Color);
nui.DepthStream.Open(ImageStreamType.Depth, 2, ImageResolution.Resolution320x240, ImageType.DepthAndPlayerIndex);
}
catch (InvalidOperationException)
{
System.Windows.MessageBox.Show("Failed to open stream. Please make sure to specify a supported image type and resolution.");
return;
}
lastTime = DateTime.Now;
nui.DepthFrameReady += new EventHandler<ImageFrameReadyEventArgs>(nui_DepthFrameReady);
nui.SkeletonFrameReady += new EventHandler<SkeletonFrameReadyEventArgs>(nui_SkeletonFrameReady);
nui.VideoFrameReady += new EventHandler<ImageFrameReadyEventArgs>(nui_ColorFrameReady);
}
示例7: speechRecognitionGenerator
private void speechRecognitionGenerator()
{
RecognizerInfo ri = GetKinectRecognizer();
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
var options = new Choices();
options.Add("password");
options.Add("oscar");
options.Add("zeus");
var gb = new GrammarBuilder();
//Specify the culture to match the recognizer in case we are running in a different culture.
gb.Culture = ri.Culture;
gb.Append(options);
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(sre_SpeechHypothesized);
sre.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
Stream audioStream = this.KinectDevice.AudioSource.Start();
Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo info = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(Microsoft.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null);
sre.SetInputToAudioStream(audioStream, info);
sre.RecognizeAsync(RecognizeMode.Multiple);
while (ShouldRun)
{
Thread.Sleep(1000);
}
sre.RecognizeAsyncStop();
}
}
示例8: Main
//.........这里部分代码省略.........
// We get a source obj and initialize context specific variables
using (KinectAudioSource source = new KinectAudioSource()) {
source.FeatureMode = true;
source.AutomaticGainControl = false;
source.SystemMode = SystemMode.OptibeamArrayOnly;
RecognizerInfo recognizer_info = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == SpeechRecognitionID).FirstOrDefault();
// Make sure we got the hook
if (recognizer_info == null)
ExitProgram("There's no speech recognizer on your system. Please install one from the README.");
// Get a hook into iTunes using the COM library
iTunesApp itunes_application = new iTunesApp();
if (itunes_application == null)
ExitProgram("There was a problem getting access to iTunes.");
using (SpeechRecognitionEngine speech_recognizer = new SpeechRecognitionEngine(recognizer_info.Id)) {
// First, we create a grammar with basic iTunes instructions
Choices basic_itunes_options = new Choices();
basic_itunes_options.Add("itunes play");
basic_itunes_options.Add("itunes pause");
basic_itunes_options.Add("itunes stop");
basic_itunes_options.Add("itunes next");
basic_itunes_options.Add("itunes previous");
basic_itunes_options.Add("itunes mute");
basic_itunes_options.Add("itunes volume up");
basic_itunes_options.Add("itunes volume down");
GrammarBuilder basic_itunes_grammar = new GrammarBuilder();
basic_itunes_grammar.Append(basic_itunes_options);
// Next, we make an iTunes library-specific set of grammars for granular control
// The following is inspired by but not directly lifted from KinecTunes. Credit
// is due for inspiration though
Choices dynamic_itunes_options = new Choices();
IITLibraryPlaylist itunes_library = itunes_application.LibraryPlaylist;
// The library is one-based so we go through each track and pull out relevant data into the grammar
// We maintain lists to avoid duplicate grammars, which can cause errors with the Kinect
List<string> artists = new List<string>();
List<string> songs = new List<string>();
List<string> albums = new List<string>();
for (int i = 1; i <= itunes_library.Tracks.Count; i++) {
IITTrack track = itunes_library.Tracks[i];
if (track != null && track.KindAsString.Contains("audio")) {
if (track.Name != null && !artists.Contains(track.Name)) {
dynamic_itunes_options.Add(string.Format("itunes play {0}", track.Name));
dynamic_itunes_options.Add(string.Format("itunes play song {0}", track.Name));
songs.Add(track.Name);
}
if (track.Artist != null && !artists.Contains(track.Artist)) {
dynamic_itunes_options.Add(string.Format("itunes play {0}", track.Artist));
dynamic_itunes_options.Add(string.Format("itunes play artist {0}", track.Artist));
artists.Add(track.Artist);
}
if (track.Album != null && !albums.Contains(track.Album)) {
dynamic_itunes_options.Add(string.Format("itunes play {0}", track.Album));
dynamic_itunes_options.Add(string.Format("itunes play album {0}", track.Album));
albums.Add(track.Album);
}
}
}
// Treat the playlists specially
List<string> playlists = new List<string>();
for (int i = 1; i <= itunes_application.LibrarySource.Playlists.Count; i++) {
var playlist = itunes_application.LibrarySource.Playlists[i];
if (playlist.Name != null && !playlists.Contains(playlist.Name)) {
playlists.Add(playlist.Name);
dynamic_itunes_options.Add(string.Format("itunes play {0}", playlist.Name));
dynamic_itunes_options.Add(string.Format("itunes play playlist {0}", playlist.Name));
}
}
GrammarBuilder dynamic_itunes_grammar = new GrammarBuilder();
dynamic_itunes_grammar.Append(dynamic_itunes_options);
// Load all the grammars into a grammar object, then our speech recognition engine
Grammar itunes_grammar_one = new Grammar(basic_itunes_grammar);
Grammar itunes_grammar_two = new Grammar(dynamic_itunes_grammar);
// Notice that we don't care when the speech is hypothesized or rejected, only accepted
speech_recognizer.LoadGrammar(itunes_grammar_one);
speech_recognizer.LoadGrammar(itunes_grammar_two);
speech_recognizer.SpeechRecognized += SpeechWasRecognized;
using (Stream s = source.Start()) {
speech_recognizer.SetInputToAudioStream(s, new SpeechAudioFormatInfo(EncodingFormat.Pcm,
16000, 16, 1, 32000,
2, null));
Console.Write("Kinect has loaded iTunes Library. Initializing speech recognition...");
// Why is signal handling so difficult in C#? Whatever, let's just use any keystrokes for interrupt
speech_recognizer.RecognizeAsync(RecognizeMode.Multiple);
Console.WriteLine("OK.\nPress any key to exit...");
Console.ReadLine();
speech_recognizer.RecognizeAsyncStop();
}
}
}
}
示例9: startAudio
/// <summary>
/// Starts the audio.
/// </summary>
private static void startAudio()
{
if (sensor == null)
{
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine("No Kinect sensors are attached to this computer");
return;
}
// Get the Kinect Audio Source
KinectAudioSource audioSource = sensor.AudioSource;
audioSource.AutomaticGainControlEnabled = false;
audioSource.NoiseSuppression = true;
RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
return;
}
Console.ForegroundColor = ConsoleColor.Green;
Console.WriteLine("Using: {0}", ri.Name);
// NOTE: Need to wait 4 seconds for device to be ready right after initialization
int wait = 4;
while (wait > 0)
{
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("Device will be ready for speech recognition in {0} second(s).\r", wait--);
Thread.Sleep(1000);
}
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
var options = new Choices();
options.Add("Red");
options.Add("Green");
options.Add("Blue");
options.Add("Yellow");
var gb = new GrammarBuilder { Culture = ri.Culture };
// Specify the culture to match the recognizer in case we are running in a different culture.
gb.Append(options);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using (Stream s = audioSource.Start())
{
sre.SetInputToAudioStream(
s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
Console.ForegroundColor = ConsoleColor.Blue;
Console.WriteLine("Recognizing speech. Read: 'Red', 'Green', 'Blue', 'Yellow'");
sre.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("Stopping recognizer ...");
sre.RecognizeAsyncStop();
}
}
}
示例10: BuildGrammarforRecognizer
private void BuildGrammarforRecognizer(object recognizerInfo)
{
EnableKinectAudioSource();
var grammarBuilder = new GrammarBuilder { Culture = (recognizerInfo as RecognizerInfo).Culture };
// Creating another Grammar and load
var newGrammarBuilder = new GrammarBuilder();
newGrammarBuilder.Append(new Choices("Schließe die Anwendung", "Ich hasse euch alle", "nächsten Folie"));
var grammarClose = new Grammar(newGrammarBuilder);
int SamplesPerSecond = 16000;
int bitsPerSample = 16;
int channels = 1;
int averageBytesPerSecond = 32000;
int blockAlign = 2;
using (var speechRecognizer = new SpeechRecognitionEngine((recognizerInfo as RecognizerInfo).Id))
{
speechRecognizer.LoadGrammar(grammarClose);
speechRecognizer.SpeechRecognized += SreSpeechRecognized;
speechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
speechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using (Stream s = source.Start())
{
speechRecognizer.SetInputToAudioStream(
s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, SamplesPerSecond, bitsPerSample, channels, averageBytesPerSecond, blockAlign, null));
while (keepRunning)
{
RecognitionResult result = speechRecognizer.Recognize(new TimeSpan(0, 0, 5));
}
speechRecognizer.RecognizeAsyncStop();
}
}
}
示例11: Carname_Load
public void Carname_Load()
{
recognizer = new SpeechRecognitionEngine();
recognizer.SetInputToDefaultAudioDevice();
modelPic.Visibility = System.Windows.Visibility.Visible;
recognizer.RecognizeAsyncStop();
model.Visibility = System.Windows.Visibility.Visible;
if (title == "BMW")
{
obj.SpeakAsync("Choose from BMW cars");
obj.SpeakAsync("3 Series");
obj.SpeakAsync("5 Series");
obj.SpeakAsync("7 Series");
obj.SpeakAsync("C class");
mod.Content = "BMW";
model1.Content = "3 Series";
model2.Content = "5 Series";
model3.Content = "7 Series";
model4.Content = "C Class";
model1.Visibility = System.Windows.Visibility.Visible;
model2.Visibility = System.Windows.Visibility.Visible;
model3.Visibility = System.Windows.Visibility.Visible;
model4.Visibility = System.Windows.Visibility.Visible;
Choices models = new Choices();
models.Add(new string[] { "3 series", "5 series", "7 series", "c class" });
// Create a GrammarBuilder object and append the Choices object.
GrammarBuilder gb = new GrammarBuilder();
gb.Append(models);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
Thread.Sleep(10000);
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);
}
else if (title == "Toyota")
{
// Image image = Image.FromFile("toyota.jpg");
//pictureBox6.Image = image;
obj.SpeakAsync("Choose from TOYOTA cars");
obj.SpeakAsync("Corolla");
obj.SpeakAsync("Camry");
obj.SpeakAsync("Prado");
obj.SpeakAsync("prius");
obj.SpeakAsync("hilux");
mod.Content = "Toyota";
model1.Content = "Corolla";
model2.Content = "Camry";
model3.Content = "Prado";
model4.Content = "Prius";
model5.Content = "Hilux";
model1.Visibility = System.Windows.Visibility.Visible;
model2.Visibility = System.Windows.Visibility.Visible;
model3.Visibility = System.Windows.Visibility.Visible;
model4.Visibility = System.Windows.Visibility.Visible;
model5.Visibility = System.Windows.Visibility.Visible;
Choices toyomodels = new Choices();
toyomodels.Add(new string[] { "Corolla", "Camry", "Prado", "Prius", "Hilux" });
// Create a GrammarBuilder object and append the Choices object.
GrammarBuilder gb = new GrammarBuilder();
gb.Append(toyomodels);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
Thread.Sleep(10000);
recognizer.RecognizeAsync(RecognizeMode.Single);
// Register a handler for the SpeechRecognized event.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(icers_carnamerecognized);
}
else if (title == "Suzuki")
{
obj.SpeakAsync("Choose from suzuki cars");
obj.SpeakAsync("mehran");
obj.SpeakAsync("cultus");
obj.SpeakAsync("bolan");
obj.SpeakAsync("swift");
obj.SpeakAsync("liana");
mod.Content = "Suzuki";
model1.Content = "Mehran";
model2.Content = "Cultus";
model3.Content = "Bolan";
model4.Content = "Swift";
model5.Content = "Liana";
model1.Visibility = System.Windows.Visibility.Visible;
//.........这里部分代码省略.........
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:101,代码来源:call.xaml.cs
示例12: SpeechRecognizer
public SpeechRecognizer()
{
using (var source = new KinectAudioSource())
{
source.FeatureMode = true;
source.AutomaticGainControl = false; //Important to turn this off for speech recognition
source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample
RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
return;
}
Console.WriteLine("Using: {0}", ri.Name);
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
var commands = new Choices();
commands.Add("Xbox Route");
commands.Add("Xbox Next Direction");
commands.Add("Xbox Previous Direction");
commands.Add("Xbox Spell");
commands.Add("Stanford");
commands.Add("San Jose");
commands.Add("Home");
commands.Add("650 Escondido Road");
commands.Add("California");
commands.Add("San Jose International Airport");
var letters = new Choices();
letters.Add("A");
letters.Add("B");
letters.Add("C");
letters.Add("D");
letters.Add("E");
letters.Add("F");
letters.Add("G");
letters.Add("H");
letters.Add("I");
letters.Add("J");
letters.Add("K");
letters.Add("L");
letters.Add("M");
letters.Add("N");
letters.Add("O");
letters.Add("P");
letters.Add("Q");
letters.Add("R");
letters.Add("S");
letters.Add("T");
letters.Add("U");
letters.Add("V");
letters.Add("X");
letters.Add("W");
letters.Add("Y");
letters.Add("Z");
var gb = new GrammarBuilder();
//Specify the culture to match the recognizer in case we are running in a different culture.
gb.Culture = ri.Culture;
gb.Append(commands);
var gbletter = new GrammarBuilder();
gbletter.Culture = ri.Culture;
gbletter.Append(letters);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
var gbl = new Grammar(gbletter);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using (Stream s = source.Start())
{
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
Console.WriteLine("Recognizing. Say: 'Xbox Route', 'Xbox Next Direction', 'Xbox Previous Direction' or 'Xbox Spell (to spell your point)'. Press ENTER to stop");
sre.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("Stopping recognizer ...");
sre.RecognizeAsyncStop();
}
}
}
}
示例13: srs_SpeechRecognized
void srs_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
recognizer = new SpeechRecognitionEngine();
recognizer.SetInputToDefaultAudioDevice();
if (e.Result.Text == "Toyota")
{
obj.SpeakAsync("You have chosen Toyota");
title = "Toyota";
toyo_rb.IsChecked = true;
}
else if (e.Result.Text == "Honda")
{
obj.SpeakAsync("You have chosen Honda");
title = "Honda";
hon_rb.IsChecked = true;
}
else if (e.Result.Text == "Suzuki")
{
obj.SpeakAsync("You have chosen Pakistani SUZUKI");
title = "Suzuki";
suzu_rb.IsChecked = true;
}
else if (e.Result.Text == "Kia")
{
obj.SpeakAsync("You have chosen KIA");
title = "Kia";
kia_rb.IsChecked = true;
}
else if (e.Result.Text == "BMW")
{
title = "BMW";
obj.SpeakAsync("You have chosen B M W");
bmw_rb.IsChecked = true;
}
else
{
obj.SpeakAsync("Please choose from the list.");
}
obj.SpeakAsync("Say yes or no");
recognizer.RecognizeAsyncStop();
Choices choices = new Choices("yes", "no");
GrammarBuilder grammarBuilder = new GrammarBuilder(choices);
recognizer.RecognizeAsyncStop();
Grammar grammar = new Grammar(grammarBuilder);
recognizer.UnloadAllGrammars();
recognizer.LoadGrammar(grammar);
Thread.Sleep(4000);
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(carname);
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:59,代码来源:call.xaml.cs
示例14: car_model
public void car_model()
{
main.Visibility = System.Windows.Visibility.Hidden;
brand.Visibility = System.Windows.Visibility.Visible;
recognizer = new SpeechRecognitionEngine();
obj.SpeakAsync("Please Choose the Car Brand....");
recognizer.RecognizeAsyncStop();
GrammarBuilder gb = new GrammarBuilder();
Choices models = new Choices();
models.Add(new string[] { "Toyota", "Suzuki", "Honda", "Kia", "BMW" });
gb.Append(models);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
recognizer.SetInputToDefaultAudioDevice();
Thread.Sleep(3000);
recognizer.RecognizeAsync(RecognizeMode.Single);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(srs_SpeechRecognized);
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:25,代码来源:call.xaml.cs
示例15: match_enq
private void match_enq()
{
try
{
con = new SqlCeConnection(@"Data Source=C:\Users\RnBz\Documents\Visual Studio 2012\Projects\Speech Recognition2\SpeechRecognition\bin\Debug\icers.sdf");
con.Open();
SqlCeDataAdapter da = new SqlCeDataAdapter("select distinct(sel_id) from matches where enq_id=" + enq_check, con);
DataTable dt= new DataTable();
da.Fill(dt);
if(dt.Rows.Count==0)
{
obj.SpeakAsync("I can not Finf any matching results...");
Thread.Sleep(1500);
}
else
{
foreach (DataRow row in dt.Rows)
{
int sel_id = int.Parse(row["sel_id"].ToString());
Console.WriteLine(row["sel_id"].ToString());
SqlCeDataAdapter da1 = new SqlCeDataAdapter("select * from sellers where id="+sel_id, con);
DataTable dt_sel = new DataTable();
da1.Fill(dt_sel);
recognizer = new SpeechRecognitionEngine();
recognizer.RecognizeAsyncStop();
GrammarBuilder gb = new GrammarBuilder();
Choices id = new Choices();
foreach (DataRow row2 in dt_sel.Rows)
{ obj.SpeakAsync("Unique ID "+row2["id"].ToString()+": "+row2["title"].ToString());
obj.SpeakAsync(",Contact Number... : " + row2["contact"].ToString());
obj.SpeakAsync("I repeat: " + row2["contact"].ToString());
id.Add(row2["id"].ToString());
}
gb.Append(id);
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
recognizer.SetInputToDefaultAudioDevice();
while (recognizer.AudioState.Equals( AudioState.Speech))
{
Thread.Sleep(100);
}
}
}
Thread.Sleep(500);
obj.SpeakAsync("Thank you for using my help, Please come back again.....");
Thread.Sleep(4000);
mainmenu hey = new mainmenu();
hey.Show();
this.Close();
}
catch (Exception ea)
{
Console.WriteLine(ea.Message);
obj.SpeakAsync("I can not find any such results, Please try another enquiry...");
Thread.Sleep(5000);
check_enq();
}
finally { con.Close(); }
}
开发者ID:rayedbajwa,项目名称:Customer-Enquiry-and-Response-System---Voice-Recognition-system-with-Car-Deals-Search-Algortihms-,代码行数:82,代码来源:call.xaml.cs