本文整理汇总了C#中SpeechRecognitionEngine.RecognizeAsync方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.RecognizeAsync方法的具体用法?C# SpeechRecognitionEngine.RecognizeAsync怎么用?C# SpeechRecognitionEngine.RecognizeAsync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.RecognizeAsync方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: InitializeSpeechRecognitionEngine
public void InitializeSpeechRecognitionEngine(String filePath)
{
MySpeechRecognitionEngine = new SpeechRecognitionEngine();
//MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();
MySpeechRecognitionEngine.UnloadAllGrammars();
try
{
MySpeechRecognitionEngine.SetInputToWaveFile(filePath);
Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));
MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());
MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);
MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;
MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;
MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;
MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;
}
catch (Exception ex)
{
Console.Write(ex.Message.ToString());
}
}
示例2: Transcribe
private string Transcribe(MemoryStream audioFile)
{
using (var recognizer = new SpeechRecognitionEngine())
{
// Create and load a grammar.
var dictation = new DictationGrammar
{
Name = "Dictation Grammar"
};
recognizer.LoadGrammar(dictation);
// Configure the input to the recognizer.
recognizer.SetInputToWaveStream(audioFile);
// Attach event handlers for the results of recognition.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.RecognizeCompleted +=
new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);
// Perform recognition on the entire file.
Console.WriteLine("Starting asynchronous recognition...");
completed = false;
recognizer.RecognizeAsync(RecognizeMode.Single);
// Keep the console window open.
while (!completed)
{
// let it work until it's done
}
}
return TranscribedText;
}
示例3: MainWindow
public MainWindow()
{
InitializeComponent();
var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
//config.Populate();
config.Save();
//config.Save(
return;
var engine = new SpeechRecognitionEngine();
var builder = new GrammarBuilder();
builder.Append( "tag" );
builder.Append( new Choices( "baron", "dragon" ) );
engine.RequestRecognizerUpdate();
engine.LoadGrammar( new Grammar( builder ) );
engine.SpeechRecognized += engine_SpeechRecognized;
engine.SetInputToDefaultAudioDevice();
engine.RecognizeAsync( RecognizeMode.Multiple );
CompositionTarget.Rendering += CompositionTarget_Rendering;
this.DataContext = this;
}
示例4: SpeechRecognizer
/*
* SpeechRecognizer
*
* @param GName - grammar file name
*/
public SpeechRecognizer(string GName, int minConfidence)
{
//creates the speech recognizer engine
sr = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-PT"));
sr.SetInputToDefaultAudioDevice();
Console.WriteLine("confiança : " + minConfidence);
sr.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", minConfidence);
Grammar gr = null;
//verifies if file exist, and loads the Grammar file, else load defualt grammar
if (System.IO.File.Exists(GName))
{
gr = new Grammar(GName);
gr.Enabled = true;
}
else
Console.WriteLine("Can't read grammar file");
//load Grammar to speech engine
sr.LoadGrammar(gr);
//assigns a method, to execute when speech is recognized
sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
//assigns a method, to execute when speech is NOT recognized
sr.SpeechRecognitionRejected +=
new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejected);
// Start asynchronous, continuous speech recognition.
sr.RecognizeAsync(RecognizeMode.Multiple);
}
示例5: StartListening
public void StartListening()
{
if (null != _ri)
{
_speechEngine = new SpeechRecognitionEngine(_ri.Id);
// Create a grammar from grammar definition XML file.
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
{
var g = new Grammar(memoryStream);
_speechEngine.LoadGrammar(g);
}
_speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
_speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;
// let the convertStream know speech is going active
_convertStream.SpeechActive = true;
// For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
// This will prevent recognition accuracy from degrading over time.
////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);
_speechEngine.SetInputToAudioStream(
_convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
_speechEngine.RecognizeAsync(RecognizeMode.Multiple);
//_isInTrainingMode = true;
}
//else
// throw new InvalidOperationException("RecognizerInfo cannot be null");
}
示例6: StartListening
public void StartListening()
{
if (null != _ri)
{
_speechEngine = new SpeechRecognitionEngine(_ri.Id);
// Create a grammar from grammar definition XML file.
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
{
var g = new Grammar(memoryStream);
_speechEngine.LoadGrammar(g);
}
_speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
_speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;
// let the convertStream know speech is going active
_convertStream.SpeechActive = true;
_speechEngine.SetInputToAudioStream(
_convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
_speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
}
示例7: VoiceRecognizer
public VoiceRecognizer()
{
try
{
// Create a new SpeechRecognitionEngine instance.
voiceEngine = new SpeechRecognitionEngine(new CultureInfo("en-US"));
// Setup the audio device
voiceEngine.SetInputToDefaultAudioDevice();
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(CommandPool.BuildSrgsGrammar());
voiceEngine.LoadGrammar(g);
//voiceEngine.EndSilenceTimeout = new TimeSpan(0, 0, 1);
// Register a handler for the SpeechRecognized event
voiceEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
// Start listening in multiple mode (that is, don't quit after a single recongition)
voiceEngine.RecognizeAsync(RecognizeMode.Multiple);
IsSetup = true;
}
catch(Exception e)
{
IsSetup = false;
}
}
示例8: btn_connect_Click
private void btn_connect_Click(object sender, EventArgs e)
{
ushort port;
ushort.TryParse(txt_port.Text, out port);
try
{
current_player = new AssPlayer(players[cmb_players.SelectedItem.ToString()], txt_host.Text, port);
}
catch(Exception ex)
{
MessageBox.Show("Could not connect: " + ex.Message);
return;
}
voice_threshold = (float)num_voice_threshold.Value;
recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
Grammar player_gramar = prepare_grammar(current_player.commands);
recognizer.LoadGrammar(player_gramar);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
taskbar_icon.Visible = true;
Hide();
}
示例9: MainWindow
/// <summary>
/// Initializes a new instance of the <see cref="MainWindow"/> class.
/// </summary>
public MainWindow()
{
InitializeComponent();
try
{
// create the engine
//speechRecognitionEngine = createSpeechEngine("de-DE");
//speechRecognitionEngine = createSpeechEngine(CultureInfo.CurrentCulture.Name);
speechRecognitionEngine = createSpeechEngine("es-ES");
// hook to events
speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated);
// Create and load a dictation grammar.
speechRecognitionEngine.LoadGrammar(new DictationGrammar());
speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);
// use the system's default microphone
speechRecognitionEngine.SetInputToDefaultAudioDevice();
// start listening
speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "Voice recognition failed");
}
}
示例10: Main
static void Main(string[] args)
{
try
{
ss.SetOutputToDefaultAudioDevice();
Console.WriteLine("\n(Speaking: I am awake)");
ss.Speak("I am awake");
CultureInfo ci = new CultureInfo("en-us");
sre = new SpeechRecognitionEngine(ci);
sre.SetInputToDefaultAudioDevice();
sre.SpeechRecognized += sre_SpeechRecognized;
Choices ch_StartStopCommands = new Choices();
ch_StartStopCommands.Add("Alexa record");
ch_StartStopCommands.Add("speech off");
ch_StartStopCommands.Add("klatu barada nikto");
GrammarBuilder gb_StartStop = new GrammarBuilder();
gb_StartStop.Append(ch_StartStopCommands);
Grammar g_StartStop = new Grammar(gb_StartStop);
sre.LoadGrammarAsync(g_StartStop);
sre.RecognizeAsync(RecognizeMode.Multiple); // multiple grammars
while (done == false) { ; }
Console.WriteLine("\nHit <enter> to close shell\n");
Console.ReadLine();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
Console.ReadLine();
}
}
示例11: initRS
public void initRS()
{
try
{
SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
var words = new Choices();
words.Add("Hello");
words.Add("Jump");
words.Add("Left");
words.Add("Right");
var gb = new GrammarBuilder();
gb.Culture = new System.Globalization.CultureInfo("en-US");
gb.Append(words);
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
sre.SetInputToDefaultAudioDevice();
sre.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception e)
{
label1.Text = "init RS Error : " + e.ToString();
}
}
示例12: load_listen
public void load_listen(VI_Profile profile, VI_Settings settings, ListView statusContainer)
{
this.profile = profile;
this.settings = settings;
this.statusContainer = statusContainer;
vi_syn = profile.synth;
vi_syn.SelectVoice(settings.voice_info);
vi_sre = new SpeechRecognitionEngine(settings.recognizer_info);
GrammarBuilder phrases_grammar = new GrammarBuilder();
List<string> glossory = new List<string>();
foreach (VI_Phrase trigger in profile.Profile_Triggers)
{
glossory.Add(trigger.value);
}
if (glossory.Count == 0)
{
MessageBox.Show("You need to add at least one Trigger");
return;
}
phrases_grammar.Append(new Choices(glossory.ToArray()));
vi_sre.LoadGrammar(new Grammar(phrases_grammar));
//set event function
vi_sre.SpeechRecognized += phraseRecognized;
vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;
vi_sre.SetInputToDefaultAudioDevice();
vi_sre.RecognizeAsync(RecognizeMode.Multiple);
}
示例13: VoiceControl
/// <summary>
/// Starts up the SkeletonSlam class.
/// </summary>
public VoiceControl()
{
kinectSensor = KinectSensor.KinectSensors[0];
TransformSmoothParameters smoothingParam = new TransformSmoothParameters();
{
smoothingParam.Smoothing = 0.5f;
smoothingParam.Correction = 0.5f;
smoothingParam.Prediction = 0.5f;
smoothingParam.JitterRadius = 0.05f;
smoothingParam.MaxDeviationRadius = 0.04f;
};
kinectSensor.SkeletonStream.Enable(smoothingParam);
kinectSensor.SkeletonFrameReady += getSkeleton;
sre = CreateSpeechRecognizer();
kinectSensor.Start();
sre.SetInputToAudioStream(kinectSensor.AudioSource.Start(),
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
sre.RecognizeAsync(RecognizeMode.Multiple);
reset();
}
示例14: Start
public void Start()
{
try
{
// create the engine
speechRecognitionEngine = createSpeechEngine("en-US");
// hook to event
speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);
// load dictionary
loadGrammarAndCommands();
// use the system's default microphone
speechRecognitionEngine.SetInputToDefaultAudioDevice();
// start listening
speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
//Create the speech synthesizer
speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.Rate = -5;
}
catch (Exception ex)
{
Console.WriteLine("Voice recognition failed " + ex.Message);
}
//Keeps the command prompt going until you say jarvis quit
while(lastCommand.ToLower() != "quit")
{
}
}
示例15: worker_DoWork
private void worker_DoWork(object sender, DoWorkEventArgs e)
{
Thread.CurrentThread.Name = "Kinect audio thread";
if(_device.Type == DeviceType.KINECT_1)
{
SpeechRecognizer = new SpeechRecognitionEngine(recognizerInfo.Id);
SpeechRecognizer.LoadGrammar(GetCurrentGrammar());
SpeechRecognizer.SpeechRecognized += SreSpeechRecognized;
SpeechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
SpeechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
//set sensor audio source to variable
audioSource = _device.sensor.AudioSource;
//Set the beam angle mode - the direction the audio beam is pointing
//we want it to be set to adaptive
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
//start the audiosource
var kinectStream = audioSource.Start();
//configure incoming audio stream
SpeechRecognizer.SetInputToAudioStream(
kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
//make sure the recognizer does not stop after completing
SpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
//reduce background and ambient noise for better accuracy
_device.sensor.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
_device.sensor.AudioSource.AutomaticGainControlEnabled = false;
audioStarted = true;
}
Console.WriteLine("END OF WORKER AUDIO");
}