本文整理汇总了C#中SpeechRecognitionEngine.SetInputToDefaultAudioDevice方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.SetInputToDefaultAudioDevice方法的具体用法?C# SpeechRecognitionEngine.SetInputToDefaultAudioDevice怎么用?C# SpeechRecognitionEngine.SetInputToDefaultAudioDevice使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.SetInputToDefaultAudioDevice方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: initRS
public void initRS()
{
try
{
SpeechRecognitionEngine sre = new SpeechRecognitionEngine(new CultureInfo("en-US"));
var words = new Choices();
words.Add("Hello");
words.Add("Jump");
words.Add("Left");
words.Add("Right");
var gb = new GrammarBuilder();
gb.Culture = new System.Globalization.CultureInfo("en-US");
gb.Append(words);
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
sre.SetInputToDefaultAudioDevice();
sre.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception e)
{
label1.Text = "init RS Error : " + e.ToString();
}
}
示例2: InitSpeechRecognition
private void InitSpeechRecognition()
{
speechRecognizer = new SpeechRecognitionEngine();
speechRecognizer.SetInputToDefaultAudioDevice();
speechRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechRecognizer_SpeechRecognized);
}
示例3: Main
static void Main(string[] args)
{
try
{
ss.SetOutputToDefaultAudioDevice();
Console.WriteLine("\n(Speaking: I am awake)");
ss.Speak("I am awake");
CultureInfo ci = new CultureInfo("en-us");
sre = new SpeechRecognitionEngine(ci);
sre.SetInputToDefaultAudioDevice();
sre.SpeechRecognized += sre_SpeechRecognized;
Choices ch_StartStopCommands = new Choices();
ch_StartStopCommands.Add("Alexa record");
ch_StartStopCommands.Add("speech off");
ch_StartStopCommands.Add("klatu barada nikto");
GrammarBuilder gb_StartStop = new GrammarBuilder();
gb_StartStop.Append(ch_StartStopCommands);
Grammar g_StartStop = new Grammar(gb_StartStop);
sre.LoadGrammarAsync(g_StartStop);
sre.RecognizeAsync(RecognizeMode.Multiple); // multiple grammars
while (done == false) { ; }
Console.WriteLine("\nHit <enter> to close shell\n");
Console.ReadLine();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
Console.ReadLine();
}
}
示例4: SpeechConversation
public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
{
SessionStorage = new SessionStorage();
if(speechSynthesizer==null)
{
speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.SetOutputToDefaultAudioDevice();
}
_speechSynthesizer = speechSynthesizer;
if(speechRecognition==null)
{
speechRecognition = new SpeechRecognitionEngine(
new System.Globalization.CultureInfo("en-US")
);
// Create a default dictation grammar.
DictationGrammar defaultDictationGrammar = new DictationGrammar();
defaultDictationGrammar.Name = "default dictation";
defaultDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(defaultDictationGrammar);
// Create the spelling dictation grammar.
DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
spellingDictationGrammar.Name = "spelling dictation";
spellingDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(spellingDictationGrammar);
// Configure input to the speech recognizer.
speechRecognition.SetInputToDefaultAudioDevice();
}
_speechRecognition = speechRecognition;
}
示例5: SpeechInput
public SpeechInput(Settings settings, MusicList musicCollection, string playerPath) {
ModeTimer = new CommandModeTimer();
RNG = new Random();
AppSettings = settings;
SRecognize = new SpeechRecognitionEngine();
Player = new Aimp3Player(playerPath);
if(musicCollection != null) {
MusicCollection = musicCollection;
} else {
throw new ArgumentNullException(nameof(musicCollection));
}
InitCommands();
try {
LoadGrammar();
SRecognize.SetInputToDefaultAudioDevice();
SRecognize.RecognizeAsync(RecognizeMode.Multiple);
} catch(Exception e) {
System.Windows.Forms.MessageBox.Show("Error while starting SpeechInput\n" + e.ToString());
}
SRecognize.SpeechRecognized += SRecognize_SpeechRecognized;
MusicCollection.SongListUpdated += (s, a) => LoadGrammar();
}
示例6: MainWindow
public MainWindow()
{
InitializeComponent();
var config = new JsonConfigHandler( System.IO.Path.Combine( Environment.GetFolderPath( Environment.SpecialFolder.ApplicationData ), "LeagueTag" ) );
//config.Populate();
config.Save();
//config.Save(
return;
var engine = new SpeechRecognitionEngine();
var builder = new GrammarBuilder();
builder.Append( "tag" );
builder.Append( new Choices( "baron", "dragon" ) );
engine.RequestRecognizerUpdate();
engine.LoadGrammar( new Grammar( builder ) );
engine.SpeechRecognized += engine_SpeechRecognized;
engine.SetInputToDefaultAudioDevice();
engine.RecognizeAsync( RecognizeMode.Multiple );
CompositionTarget.Rendering += CompositionTarget_Rendering;
this.DataContext = this;
}
示例7: SpeechRecognizer
/*
* SpeechRecognizer
*
* @param GName - grammar file name
*/
public SpeechRecognizer(string GName, int minConfidence)
{
//creates the speech recognizer engine
sr = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("pt-PT"));
sr.SetInputToDefaultAudioDevice();
Console.WriteLine("confiança : " + minConfidence);
sr.UpdateRecognizerSetting("CFGConfidenceRejectionThreshold", minConfidence);
Grammar gr = null;
//verifies if file exist, and loads the Grammar file, else load defualt grammar
if (System.IO.File.Exists(GName))
{
gr = new Grammar(GName);
gr.Enabled = true;
}
else
Console.WriteLine("Can't read grammar file");
//load Grammar to speech engine
sr.LoadGrammar(gr);
//assigns a method, to execute when speech is recognized
sr.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
//assigns a method, to execute when speech is NOT recognized
sr.SpeechRecognitionRejected +=
new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRecognitionRejected);
// Start asynchronous, continuous speech recognition.
sr.RecognizeAsync(RecognizeMode.Multiple);
}
示例8: MainWindow
/// <summary>
/// Initializes a new instance of the <see cref="MainWindow"/> class.
/// </summary>
public MainWindow()
{
InitializeComponent();
try
{
// create the engine
//speechRecognitionEngine = createSpeechEngine("de-DE");
//speechRecognitionEngine = createSpeechEngine(CultureInfo.CurrentCulture.Name);
speechRecognitionEngine = createSpeechEngine("es-ES");
// hook to events
speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated);
// Create and load a dictation grammar.
speechRecognitionEngine.LoadGrammar(new DictationGrammar());
speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);
// use the system's default microphone
speechRecognitionEngine.SetInputToDefaultAudioDevice();
// start listening
speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message, "Voice recognition failed");
}
}
示例9: InitializeSRE
public static SpeechRecognitionEngine InitializeSRE()
{
//Create the speech recognition engine
SpeechRecognitionEngine sre = new SpeechRecognitionEngine();
using (sre)
{
//Set the audio device to the OS default
sre.SetInputToDefaultAudioDevice();
// Reset the Grammar
sre.UnloadAllGrammars();
// Load the plugins
LoadPlugins();
//Load all of the grammars
foreach (IJarvisPlugin plugin in _plugins)
sre.LoadGrammar(plugin.getGrammar());
//Set the recognition mode
sre.RecognizeAsync(RecognizeMode.Multiple);
//Add an event Handler
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(Engine.SpeechRecognized);
while (!Jarvis.JarvisMain.stop)
{
}
}
return sre;
}
示例10: VoiceRecognizer
public VoiceRecognizer()
{
try
{
// Create a new SpeechRecognitionEngine instance.
voiceEngine = new SpeechRecognitionEngine(new CultureInfo("en-US"));
// Setup the audio device
voiceEngine.SetInputToDefaultAudioDevice();
// Create the Grammar instance and load it into the speech recognition engine.
Grammar g = new Grammar(CommandPool.BuildSrgsGrammar());
voiceEngine.LoadGrammar(g);
//voiceEngine.EndSilenceTimeout = new TimeSpan(0, 0, 1);
// Register a handler for the SpeechRecognized event
voiceEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
// Start listening in multiple mode (that is, don't quit after a single recongition)
voiceEngine.RecognizeAsync(RecognizeMode.Multiple);
IsSetup = true;
}
catch(Exception e)
{
IsSetup = false;
}
}
示例11: btn_connect_Click
private void btn_connect_Click(object sender, EventArgs e)
{
ushort port;
ushort.TryParse(txt_port.Text, out port);
try
{
current_player = new AssPlayer(players[cmb_players.SelectedItem.ToString()], txt_host.Text, port);
}
catch(Exception ex)
{
MessageBox.Show("Could not connect: " + ex.Message);
return;
}
voice_threshold = (float)num_voice_threshold.Value;
recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US"));
Grammar player_gramar = prepare_grammar(current_player.commands);
recognizer.LoadGrammar(player_gramar);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
taskbar_icon.Visible = true;
Hide();
}
示例12: PhraseBasedSpeechRecognizer
public PhraseBasedSpeechRecognizer()
{
_grammars = new Dictionary<string, Grammar>();
_speechRecognizer = new SpeechRecognitionEngine();
_speechRecognizer.SetInputToDefaultAudioDevice();
}
示例13: GetSpeechRecognitionEngine
private static SpeechRecognitionEngine GetSpeechRecognitionEngine()
{
var sre = new SpeechRecognitionEngine();
sre.LoadGrammar(new DictationGrammar());
sre.SetInputToDefaultAudioDevice();
sre.SpeechRecognized += (s, e) =>
{
if (e.Result != null &&
!String.IsNullOrEmpty(e.Result.Text))
{
using (new ConsoleForegroundColor(ConsoleColor.Green))
{
Console.WriteLine(e.Result.Text);
}
return;
}
using (new ConsoleForegroundColor(ConsoleColor.Red))
{
Console.WriteLine("Recognized text not available.");
}
};
//sr.SpeechRecognized += SpeechRecognizedHandler;
return sre;
}
示例14: load_listen
public void load_listen(VI_Profile profile, VI_Settings settings, ListView statusContainer)
{
this.profile = profile;
this.settings = settings;
this.statusContainer = statusContainer;
vi_syn = profile.synth;
vi_syn.SelectVoice(settings.voice_info);
vi_sre = new SpeechRecognitionEngine(settings.recognizer_info);
GrammarBuilder phrases_grammar = new GrammarBuilder();
List<string> glossory = new List<string>();
foreach (VI_Phrase trigger in profile.Profile_Triggers)
{
glossory.Add(trigger.value);
}
if (glossory.Count == 0)
{
MessageBox.Show("You need to add at least one Trigger");
return;
}
phrases_grammar.Append(new Choices(glossory.ToArray()));
vi_sre.LoadGrammar(new Grammar(phrases_grammar));
//set event function
vi_sre.SpeechRecognized += phraseRecognized;
vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected;
vi_sre.SetInputToDefaultAudioDevice();
vi_sre.RecognizeAsync(RecognizeMode.Multiple);
}
示例15: Start
public void Start()
{
try
{
// create the engine
speechRecognitionEngine = createSpeechEngine("en-US");
// hook to event
speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(engine_SpeechRecognized);
// load dictionary
loadGrammarAndCommands();
// use the system's default microphone
speechRecognitionEngine.SetInputToDefaultAudioDevice();
// start listening
speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
//Create the speech synthesizer
speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.Rate = -5;
}
catch (Exception ex)
{
Console.WriteLine("Voice recognition failed " + ex.Message);
}
//Keeps the command prompt going until you say jarvis quit
while(lastCommand.ToLower() != "quit")
{
}
}