本文整理汇总了C#中SpeechRecognitionEngine.Recognize方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.Recognize方法的具体用法?C# SpeechRecognitionEngine.Recognize怎么用?C# SpeechRecognitionEngine.Recognize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.Recognize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Transcribe
public void Transcribe(MediaSegment segment)
{
using (var engine = new SpeechRecognitionEngine())
{
engine.LoadGrammar(new DictationGrammar());
engine.SetInputToWaveFile(segment.File.FullName);
var result = engine.Recognize();
var metaDatum = new Metadata();
metaDatum.Start = result.Audio.AudioPosition.TotalMilliseconds + segment.OffsetMs;
metaDatum.End = metaDatum.Start + segment.DurationMs;
metaDatum.EngineMetadata = new SpeechResults
{
Text = result.Text,
Confidence = result.Confidence
};
_concurrentDictionary.AddOrUpdate(segment.FileId, new List<Metadata> {metaDatum}, (x, y) =>
{
y.Add(metaDatum);
return y;
});
}
}
示例2: codeBlock
/// <summary>
/// Recognize method has not handle exception if audio is not in video file and it goes in while loop. to overcome this join main thread if it not returns in specific time
/// </summary>
/// <param name="audioFilePath"></param>
/// <param name="noOfAudioFiles"></param>
/// <param name="recognizer"></param>
private void codeBlock(string audioFilePath, int noOfAudioFiles, SpeechRecognitionEngine recognizer)
{
try
{
recognizer.SetInputToWaveFile(audioFilePath);
RecognitionResult result = recognizer.Recognize(timeSpan);
audioContentMessage += "\r\n" + result.Text;
}
catch (Exception)
{
}
}
示例3: StartDesign
public void StartDesign()
{
//In Process SpeewchRecognizer
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
recognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
recognizer.SetInputToDefaultAudioDevice();
GrammarBuilder clear = new GrammarBuilder("Clear");
GrammarBuilder insert = new GrammarBuilder("Insert");
Choices gates = new Choices(new string[] { "and", "or", "not", "ex or", "nor", "nand" });
Choices columns = new Choices(new string[] { "one", "too", "three", "four", "five", "six", "seven", "eight" });
Choices rows = new Choices(new string[] { "one", "too", "three", "four", "five" });
Choices orientation = new Choices(new string[] { "left", "right", "up", "down" });
insert.Append(gates);
insert.Append(columns);
insert.Append(rows);
insert.Append("towards");
insert.Append(orientation);
GrammarBuilder connect = new GrammarBuilder("Connect");
connect.Append("output");
connect.Append(columns);
connect.Append(rows);
connect.Append("to");
connect.Append("input");
connect.Append(columns);
connect.Append(rows);
Grammar _clear_grammar = new Grammar(clear);
Grammar _insert_grammar = new Grammar(insert);
Grammar _connect_grammar = new Grammar(connect);
recognizer.LoadGrammarAsync(_clear_grammar);
recognizer.LoadGrammarAsync(_insert_grammar);
recognizer.LoadGrammarAsync(_connect_grammar);
Application.EnableVisualStyles();
Application.SetCompatibleTextRenderingDefault(false);
Application.Run(new Form1());
//recognizer.RecognizeAsync(RecognizeMode.Multiple);
while (true)
{
recognizer.Recognize();
}
}
示例4: Loop
public override void Loop()
{
return;
using (var recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US")))
{
recognizer.LoadGrammar(new DictationGrammar());
while (true)
{
recognizer.SetInputToDefaultAudioDevice();
var result = recognizer.Recognize();
if(result == null) continue;
Handle(result.Text);
}
}
}
示例5: decode
protected void decode()
{
try
{
SpeechRecognitionEngine SpeechRecognitionEngine = new SpeechRecognitionEngine();
DictationGrammar DictationGrammer = new DictationGrammar();
SpeechRecognitionEngine.LoadGrammar(DictationGrammer);
SpeechRecognitionEngine.SetInputToWaveFile(Input);
RecognitionResult Result = SpeechRecognitionEngine.Recognize();
mOutput = Result.Text;
}
catch (Exception E)
{
MessageBox.Show(E.Message);
}
}
示例6: RecognizeSpeech
public string RecognizeSpeech(byte[] speechToParse, int sampleRate)
{
SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
if (_grammar == null)
InitializeGrammar();
sre.LoadGrammar(_grammar);
MemoryStream ms = new MemoryStream(speechToParse);
var formatInfo = new SpeechAudioFormatInfo(sampleRate, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
sre.SetInputToAudioStream(ms, formatInfo);
var result = sre.Recognize();
ms = null;
if (result == null)
return "Unable to recognize speech";
else
return result.Text;
}
示例7: Record
void Record()
{
KinectAudioSource source = kinectSensor.AudioSource;
Func<RecognizerInfo, bool> matchingFunc = r =>
{
string value;
r.AdditionalInfo.TryGetValue("Kinect", out value);
return
"True".Equals(value, StringComparison.InvariantCultureIgnoreCase) &&
"en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
};
var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
if (recognizerInfo == null)
return;
speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
var gb = new GrammarBuilder { Culture = recognizerInfo.Culture };
gb.Append(choices);
var grammar = new Grammar(gb);
speechRecognitionEngine.LoadGrammar(grammar);
source.AutomaticGainControlEnabled = false;
source.BeamAngleMode = BeamAngleMode.Adaptive;
using (Stream sourceStream = source.Start())
{
speechRecognitionEngine.SetInputToAudioStream(sourceStream,
new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
while (isRunning)
{
RecognitionResult result = speechRecognitionEngine.Recognize();
if (result != null && OrderDetected != null && result.Confidence > 0.7)
OrderDetected(result.Text);
}
}
}
示例8: button1_Click
private void button1_Click(object sender, EventArgs e)
{
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
Grammar dictationGrammar = new DictationGrammar();
recognizer.LoadGrammar(dictationGrammar);
try
{
button1.Text = "Speak Now";
recognizer.SetInputToDefaultAudioDevice();
RecognitionResult result = recognizer.Recognize();
button1.Text = result.Text;
}
catch (InvalidOperationException exception)
{
button1.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
}
finally
{
recognizer.UnloadAllGrammars();
}
}
示例9: button2_Click
private void button2_Click(object sender, EventArgs e)
{
SpeechRecognitionEngine engineSpeech = new SpeechRecognitionEngine();
Grammar gram = new DictationGrammar();
engineSpeech.LoadGrammar(gram);
try
{
engineSpeech.SetInputToDefaultAudioDevice();
RecognitionResult result = engineSpeech.Recognize();
label1.Text = result.Text;
textBox1.Text = result.Text;
button1_Click(new Object(),new EventArgs());
}
catch (Exception)
{
//Do nothing
}
finally
{
engineSpeech.UnloadAllGrammars();
}
}
示例10: Record
void Record()
{
using (KinectAudioSource source = new KinectAudioSource
{
FeatureMode = true,
AutomaticGainControl = false,
SystemMode = SystemMode.OptibeamArrayOnly
})
{
RecognizerInfo recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
if (recognizerInfo == null)
return;
SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
var gb = new GrammarBuilder {Culture = recognizerInfo.Culture};
gb.Append(choices);
var grammar = new Grammar(gb);
speechRecognitionEngine.LoadGrammar(grammar);
using (Stream sourceStream = source.Start())
{
speechRecognitionEngine.SetInputToAudioStream(sourceStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
isRunning = true;
while (isRunning)
{
RecognitionResult result = speechRecognitionEngine.Recognize();
if (result != null && OrderDetected != null && result.Confidence > 0.7)
OrderDetected(result.Text);
}
}
}
}
示例11: testingAlgorithmDiscriminative
//.........这里部分代码省略.........
gc.LogGrammar("<item><token sapi:pron=\"" + pronunciations[data.listOfWords[randomWordType]][i] + "\">" + data.listOfWords[randomWordType] + "_" + i + "</token></item>");
}
wordCount++;
}
else
{
for (int k = 1; k <= MWordTypes; k++)
{
randomWordType = k;
// we don't add the "correct" word
if (randomWordType != iWordType)
{
wordTypesChosen.Add(randomWordType);
for (int i = 0; i < pronunciations[data.listOfWords[randomWordType]].Count; i++)
{
for (int j = 0; j < MSample; j++)
{
ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, MSample * (iWordType - 1) + j, numberOfAlternates * (randomWordType - 1) + i].den++;
}
gc.LogGrammar("<item><token sapi:pron=\"" + pronunciations[data.listOfWords[randomWordType]][i] + "\">" + data.listOfWords[randomWordType] + "_" + i + "</token></item>");
}
//System.Diagnostics.Debug.WriteLine("word chosen: " + randomWordType);
}
}
break;
}
//ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, iWordType - 1, randomWordType - 1].den+=MSample;
/*foreach (string pronun in pronunciations[data.listOfWords[randomWordType]])
{
gc.LogGrammar("<item><token sapi:pron=\"" + pronun + "\">" + data.listOfWords[randomWordType] + "</token></item>");
}*/
//wordCount++;
}
gc.boundrule(false, "");
gc.boundgr(false);
// setup recognizer
SpeechRecognitionEngine rec = new SpeechRecognitionEngine();
rec.LoadGrammar(gc.getGrammar());
// recognizing
for (int iSample = 1; iSample <= MSample; iSample++)
{
rec.SetInputToWaveFile(data.getAudioName(iTestSpeaker, iWordType, iSample));
RecognitionResult result;
try
{
result = rec.Recognize();
}
catch (Exception e)
{
System.Diagnostics.Debug.WriteLine(e.Message);
System.Diagnostics.Debug.WriteLine(data.getAudioName(iTestSpeaker, iWordType, iSample));
continue;
}
//System.Diagnostics.Debug.WriteLine("recognising");
if (result == null)
{
ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, MSample * (iWordType - 1) + iSample - 1, numberOfAlternates * MWordTypes].num++;
}
else
{
string[] word = result.Alternates[0].Words[0].Text.Split('_');
word = wordTypeResult(2, result);
//if (result.Alternates.Count > 2)
// System.Diagnostics.Debug.WriteLine("No. of results found for wordType " + iWordType + "sample " + iSample + " = " + result.Alternates[2].Confidence);
ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, MSample * (iWordType - 1) + iSample - 1, numberOfAlternates * (Array.IndexOf(data.listOfWords.ToArray(), word[0]) - 1) + int.Parse(word[1])].num++;
if (word[0].Equals(data.listOfWords[iWordType]))
SampleAccuracy[NTrainingSpeakers - 1, NTrainingSamplesPerSpeaker - 1, Array.IndexOf(V, vocabSize), iTestSpeaker, iWordType - 1, iSample - 1].num++;
}
/*
if (result != null && result.Alternates[0].Words[0].Text.Split('_')[0].Equals(data.listOfWords[iWordType]))
{
SampleAccuracy[NTrainingSpeakers - 1, NTrainingSamplesPerSpeaker - 1, Array.IndexOf(V, vocabSize), iTestSpeaker, iWordType - 1, iSample - 1].num++;
//System.Diagnostics.Debug.WriteLine(NTrainingSpeakers + "_" + NTrainingSamplesPerSpeaker + "_" + Array.IndexOf(V, vocabSize) + "_" + iTestSpeaker + "_" + iWordType + "_" + iSample);
}*/
SampleAccuracy[NTrainingSpeakers - 1, NTrainingSamplesPerSpeaker - 1, Array.IndexOf(V, vocabSize), iTestSpeaker, iWordType - 1, iSample - 1].den++;
}
gc.Destroy();
}
}
}
}
// end testing
}
}
}
}
}
示例12: RecognizeSpeech
public void RecognizeSpeech()
{
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
recognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(LoadGrammarCompleted);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
recognizer.SetInputToDefaultAudioDevice();
GrammarBuilder commandStarter = new GrammarBuilder("Command");
GrammarBuilder clear = new GrammarBuilder("Clear");
GrammarBuilder insert = new GrammarBuilder("Insert");
Choices gates = new Choices(new string[] { "and", "or", "not", "exor", "nor", "nand" });
Choices columns = new Choices(new string[] { "zero","one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen" });
Choices rows = new Choices(new string[] { "zero" ,"one", "two", "three", "four", "five", "six", "seven", "eight", "nine" });
//Choices orientation = new Choices(new string[] { "left", "right", "up", "down" });
insert.Append(gates);
insert.Append("R");
insert.Append(rows);
insert.Append("C");
insert.Append(columns);
//insert.Append("towards");
//insert.Append(orientation);
GrammarBuilder connect = new GrammarBuilder("Connect");
connect.Append("output");
connect.Append(columns);
connect.Append(rows);
connect.Append("to");
connect.Append("input");
connect.Append(columns);
connect.Append(rows);
Grammar _clear_grammar = new Grammar(clear);
Grammar _insert_grammar = new Grammar(insert);
Grammar _connect_grammar = new Grammar(connect);
Grammar _command_starter = new Grammar(commandStarter);
recognizer.LoadGrammarAsync(_clear_grammar);
recognizer.LoadGrammarAsync(_insert_grammar);
recognizer.LoadGrammarAsync(_connect_grammar);
recognizer.LoadGrammarAsync(_command_starter);
while (true)
{
recognizer.Recognize();
//recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
}
示例13: BuildGrammarforRecognizer
private void BuildGrammarforRecognizer(object recognizerInfo)
{
EnableKinectAudioSource();
var grammarBuilder = new GrammarBuilder { Culture = (recognizerInfo as RecognizerInfo).Culture };
// Creating another Grammar and load
var newGrammarBuilder = new GrammarBuilder();
newGrammarBuilder.Append(new Choices("Schließe die Anwendung", "Ich hasse euch alle", "nächsten Folie"));
var grammarClose = new Grammar(newGrammarBuilder);
int SamplesPerSecond = 16000;
int bitsPerSample = 16;
int channels = 1;
int averageBytesPerSecond = 32000;
int blockAlign = 2;
using (var speechRecognizer = new SpeechRecognitionEngine((recognizerInfo as RecognizerInfo).Id))
{
speechRecognizer.LoadGrammar(grammarClose);
speechRecognizer.SpeechRecognized += SreSpeechRecognized;
speechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
speechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using (Stream s = source.Start())
{
speechRecognizer.SetInputToAudioStream(
s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, SamplesPerSecond, bitsPerSample, channels, averageBytesPerSecond, blockAlign, null));
while (keepRunning)
{
RecognitionResult result = speechRecognizer.Recognize(new TimeSpan(0, 0, 5));
}
speechRecognizer.RecognizeAsyncStop();
}
}
}
示例14: CommandThreadFunc
static void CommandThreadFunc()
{
while (cameraThread != null && !cameraFixed)
{
Thread.Sleep(3000);
}
if (cameraThread != null)
{
cameraThread.Join();
}
using (var source = new KinectAudioSource())
{
source.FeatureMode = true;
source.AutomaticGainControl = false; //Important to turn this off for speech recognition
source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample
RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
if (ri == null)
{
Console.WriteLine("Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId);
return;
}
Console.WriteLine("Using: {0}", ri.Name);
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
var i4c3dCommand = new Choices();
i4c3dCommand.Add(COMMAND_INITIALIZE);
i4c3dCommand.Add(COMMAND_ZOOM_IN);
i4c3dCommand.Add(COMMAND_ZOOM_OUT);
i4c3dCommand.Add(COMMAND_STOP);
i4c3dCommand.Add(COMMAND_LEFT);
i4c3dCommand.Add(COMMAND_RIGHT);
i4c3dCommand.Add(COMMAND_UP);
i4c3dCommand.Add(COMMAND_DOWN);
//i4c3dCommand.Add(COMMAND_ALIAS);
//i4c3dCommand.Add(COMMAND_MAYA);
//i4c3dCommand.Add(COMMAND_RTT);
//i4c3dCommand.Add(COMMAND_SHOWCASE);
var gb = new GrammarBuilder();
//Specify the culture to match the recognizer in case we are running in a different culture.
gb.Culture = ri.Culture;
gb.Append(i4c3dCommand);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognizedI4C3D;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using (Stream s = source.Start())
{
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
Console.WriteLine("Recognizing. Say: 'Start' to start camera move");
while (!i4c3dStarted) Thread.Sleep(1); // �ҋ@
while (!exit)
{
sre.Recognize(new TimeSpan(100000)); // 1sec
//sre.Recognize();
}
Console.WriteLine("Stopping recognizer ...");
}
}
}
}
示例15: Listen
public void Listen()
{
source.EchoCancellationMode = EchoCancellationMode.None; // No AEC for this sample
source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition
RecognizerInfo ri = GetKinectRecognizer();
var sre = new SpeechRecognitionEngine(ri.Id);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
System.IO.Stream s = source.Start();
sre.SetInputToAudioStream(s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
while (!done)
{
Grammar newGrammar = cell.GetNewGrammar();
if (newGrammar != null)
sre.LoadGrammar(newGrammar);
sre.Recognize(new TimeSpan(0, 0, 0, 0, 500));
}
}