本文整理汇总了C#中SpeechRecognitionEngine.SetInputToWaveFile方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.SetInputToWaveFile方法的具体用法?C# SpeechRecognitionEngine.SetInputToWaveFile怎么用?C# SpeechRecognitionEngine.SetInputToWaveFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.SetInputToWaveFile方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: InitializeSpeechRecognitionEngine
public void InitializeSpeechRecognitionEngine(String filePath)
{
MySpeechRecognitionEngine = new SpeechRecognitionEngine();
//MySpeechRecognitionEngine.SetInputToDefaultAudioDevice();
MySpeechRecognitionEngine.UnloadAllGrammars();
try
{
MySpeechRecognitionEngine.SetInputToWaveFile(filePath);
Process.Start("C:\\Program Files\\Windows Media Player\\wmplayer.exe", ("\"" + filePath + "\""));
MySpeechRecognitionEngine.LoadGrammar(new DictationGrammar());
MySpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Single);
MySpeechRecognitionEngine.AudioLevelUpdated += MySpeechRecognitionEngine_AudioLevelUpdated;
MySpeechRecognitionEngine.SpeechRecognized += MySpeechRecognitionEnginee_SpeechRecognized;
MySpeechRecognitionEngine.AudioStateChanged += MySpeechRecognitionEnginee_AudioStateChanged;
MySpeechRecognitionEngine.RecognizeCompleted += MySpeechRecognitionEngine_RecognizeCompleted;
}
catch (Exception ex)
{
Console.Write(ex.Message.ToString());
}
}
示例2: Transcribe
public void Transcribe(MediaSegment segment)
{
using (var engine = new SpeechRecognitionEngine())
{
engine.LoadGrammar(new DictationGrammar());
engine.SetInputToWaveFile(segment.File.FullName);
var result = engine.Recognize();
var metaDatum = new Metadata();
metaDatum.Start = result.Audio.AudioPosition.TotalMilliseconds + segment.OffsetMs;
metaDatum.End = metaDatum.Start + segment.DurationMs;
metaDatum.EngineMetadata = new SpeechResults
{
Text = result.Text,
Confidence = result.Confidence
};
_concurrentDictionary.AddOrUpdate(segment.FileId, new List<Metadata> {metaDatum}, (x, y) =>
{
y.Add(metaDatum);
return y;
});
}
}
示例3: SpeechRecognizer
public SpeechRecognizer(List<string> phrases, string wavFilePath)
{
_phrases = phrases;
_speechRecognitionEngine = CreateSpeechRecognizer();
if (string.IsNullOrEmpty(wavFilePath))
_speechRecognitionEngine.SetInputToDefaultAudioDevice();
else
_speechRecognitionEngine.SetInputToWaveFile(wavFilePath);
_speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
}
示例4: codeBlock
/// <summary>
/// Recognize method has not handle exception if audio is not in video file and it goes in while loop. to overcome this join main thread if it not returns in specific time
/// </summary>
/// <param name="audioFilePath"></param>
/// <param name="noOfAudioFiles"></param>
/// <param name="recognizer"></param>
private void codeBlock(string audioFilePath, int noOfAudioFiles, SpeechRecognitionEngine recognizer)
{
try
{
recognizer.SetInputToWaveFile(audioFilePath);
RecognitionResult result = recognizer.Recognize(timeSpan);
audioContentMessage += "\r\n" + result.Text;
}
catch (Exception)
{
}
}
示例5: Main
// Initialize an in-process speech recognition engine.
static void Main(string[] args)
{
using (SpeechRecognitionEngine recognizer =
new SpeechRecognitionEngine())
{
// Create and load a grammar.
string[] myWords = new string[] { "Me", "Kiss", "Fluff", "Yell", "Kind", "Crack", "Hope", "Check", "Lake", "Steep", "Shell", "Bark", "Tooth", "Mouse", "Force", "Fringe", "Flight", "Haunt", "Asked", "Going", "Table", "Giant", "Bully", "Treated", "Spying", "Wiggle", "Shredded", "Picnic", "Decoy", "Slaying", "Scheming", "Happier", "Joyous", "Riotous", "Chow", "Cookie", "Feud", "Eighty", "Host", "Weather", "Crawl", "Stew" }; //Sets the words that it will be listening for
Choices commands = new Choices(); //Usually the system is sets up the words expected to be commands. Our "commands" will be the 42 words given.
commands.Add(myWords); //This adds my 42 words to the commands to be recognized (commands being a list of words).
GrammarBuilder gBuilder = new GrammarBuilder(); //This is setting up the system that will understand the words
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
recognizer.LoadGrammarAsync(grammar);
recognizer.SetInputToDefaultAudioDevice();
//Grammar dictation = new DictationGrammar();
//dictation.Name = "Dictation Grammar";
// recognizer.LoadGrammar(dictation);
StreamReader sr = new StreamReader(@"c:\Users\Taylor\Desktop\AllVoiceSamples\Input.txt");
// Read the input file to a string
String line = sr.ReadToEnd();
// Configure the input to the recognizer.
recognizer.SetInputToWaveFile(@"c:\Users\Taylor\Desktop\AllVoiceSamples\" + line);
// Attach event handlers for the results of recognition.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.RecognizeCompleted +=
new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);
// Perform recognition on the entire file.
Console.WriteLine("Starting asynchronous recognition...");
completed = false;
recognizer.RecognizeAsync();
// Keep the console window open.
while (!completed)
{
Console.ReadLine();
}
Console.WriteLine("Done.");
}
Console.WriteLine();
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
示例6: decode
protected void decode()
{
try
{
SpeechRecognitionEngine SpeechRecognitionEngine = new SpeechRecognitionEngine();
DictationGrammar DictationGrammer = new DictationGrammar();
SpeechRecognitionEngine.LoadGrammar(DictationGrammer);
SpeechRecognitionEngine.SetInputToWaveFile(Input);
RecognitionResult Result = SpeechRecognitionEngine.Recognize();
mOutput = Result.Text;
}
catch (Exception E)
{
MessageBox.Show(E.Message);
}
}
示例7: testingAlgorithmDiscriminative
//.........这里部分代码省略.........
gc.LogGrammar("<item><token sapi:pron=\"" + pronunciations[data.listOfWords[randomWordType]][i] + "\">" + data.listOfWords[randomWordType] + "_" + i + "</token></item>");
}
wordCount++;
}
else
{
for (int k = 1; k <= MWordTypes; k++)
{
randomWordType = k;
// we don't add the "correct" word
if (randomWordType != iWordType)
{
wordTypesChosen.Add(randomWordType);
for (int i = 0; i < pronunciations[data.listOfWords[randomWordType]].Count; i++)
{
for (int j = 0; j < MSample; j++)
{
ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, MSample * (iWordType - 1) + j, numberOfAlternates * (randomWordType - 1) + i].den++;
}
gc.LogGrammar("<item><token sapi:pron=\"" + pronunciations[data.listOfWords[randomWordType]][i] + "\">" + data.listOfWords[randomWordType] + "_" + i + "</token></item>");
}
//System.Diagnostics.Debug.WriteLine("word chosen: " + randomWordType);
}
}
break;
}
//ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, iWordType - 1, randomWordType - 1].den+=MSample;
/*foreach (string pronun in pronunciations[data.listOfWords[randomWordType]])
{
gc.LogGrammar("<item><token sapi:pron=\"" + pronun + "\">" + data.listOfWords[randomWordType] + "</token></item>");
}*/
//wordCount++;
}
gc.boundrule(false, "");
gc.boundgr(false);
// setup recognizer
SpeechRecognitionEngine rec = new SpeechRecognitionEngine();
rec.LoadGrammar(gc.getGrammar());
// recognizing
for (int iSample = 1; iSample <= MSample; iSample++)
{
rec.SetInputToWaveFile(data.getAudioName(iTestSpeaker, iWordType, iSample));
RecognitionResult result;
try
{
result = rec.Recognize();
}
catch (Exception e)
{
System.Diagnostics.Debug.WriteLine(e.Message);
System.Diagnostics.Debug.WriteLine(data.getAudioName(iTestSpeaker, iWordType, iSample));
continue;
}
//System.Diagnostics.Debug.WriteLine("recognising");
if (result == null)
{
ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, MSample * (iWordType - 1) + iSample - 1, numberOfAlternates * MWordTypes].num++;
}
else
{
string[] word = result.Alternates[0].Words[0].Text.Split('_');
word = wordTypeResult(2, result);
//if (result.Alternates.Count > 2)
// System.Diagnostics.Debug.WriteLine("No. of results found for wordType " + iWordType + "sample " + iSample + " = " + result.Alternates[2].Confidence);
ConfusionMatrix[NTrainingSpeakers - 1, iTestSpeaker, MSample * (iWordType - 1) + iSample - 1, numberOfAlternates * (Array.IndexOf(data.listOfWords.ToArray(), word[0]) - 1) + int.Parse(word[1])].num++;
if (word[0].Equals(data.listOfWords[iWordType]))
SampleAccuracy[NTrainingSpeakers - 1, NTrainingSamplesPerSpeaker - 1, Array.IndexOf(V, vocabSize), iTestSpeaker, iWordType - 1, iSample - 1].num++;
}
/*
if (result != null && result.Alternates[0].Words[0].Text.Split('_')[0].Equals(data.listOfWords[iWordType]))
{
SampleAccuracy[NTrainingSpeakers - 1, NTrainingSamplesPerSpeaker - 1, Array.IndexOf(V, vocabSize), iTestSpeaker, iWordType - 1, iSample - 1].num++;
//System.Diagnostics.Debug.WriteLine(NTrainingSpeakers + "_" + NTrainingSamplesPerSpeaker + "_" + Array.IndexOf(V, vocabSize) + "_" + iTestSpeaker + "_" + iWordType + "_" + iSample);
}*/
SampleAccuracy[NTrainingSpeakers - 1, NTrainingSamplesPerSpeaker - 1, Array.IndexOf(V, vocabSize), iTestSpeaker, iWordType - 1, iSample - 1].den++;
}
gc.Destroy();
}
}
}
}
// end testing
}
}
}
}
}
示例8: Recognize
private string Recognize()
{
using (SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("zh-CN")))
{
Choices colors = new Choices(new string[] { "宝塔镇河妖" });
GrammarBuilder gb = new GrammarBuilder();
gb.Append(colors);
// Create the Grammar instance.
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
// Configure the input to the recognizer.
sre.SetInputToWaveFile("d:\\output.mp3");
RecognitionResult result = sre.Recognize();
return result == null ? string.Empty : result.Text;
}
}
示例9: button3_Click
private void button3_Click(object sender, EventArgs e)
{
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
Grammar dictationGrammar = new DictationGrammar();
recognizer.LoadGrammar(dictationGrammar);
try
{
button3.Text = "Recognizing";
recognizer.SetInputToWaveFile("c:\\InMind\\temp\\fromClient3.wav");
RecognitionResult result = recognizer.Recognize();
button3.Text = result.Text;
}
catch (InvalidOperationException exception)
{
button3.Text = String.Format("Could not recognize input from default aduio device. Is a microphone or sound card available?\r\n{0} - {1}.", exception.Source, exception.Message);
}
finally
{
recognizer.UnloadAllGrammars();
}
}