本文整理汇总了C#中SpeechRecognitionEngine.SetInputToAudioStream方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechRecognitionEngine.SetInputToAudioStream方法的具体用法?C# SpeechRecognitionEngine.SetInputToAudioStream怎么用?C# SpeechRecognitionEngine.SetInputToAudioStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechRecognitionEngine
的用法示例。
在下文中一共展示了SpeechRecognitionEngine.SetInputToAudioStream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: StartListening
public void StartListening()
{
if (null != _ri)
{
_speechEngine = new SpeechRecognitionEngine(_ri.Id);
// Create a grammar from grammar definition XML file.
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
{
var g = new Grammar(memoryStream);
_speechEngine.LoadGrammar(g);
}
_speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
_speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;
// let the convertStream know speech is going active
_convertStream.SpeechActive = true;
_speechEngine.SetInputToAudioStream(
_convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
_speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
}
示例2: KinectSensorInfo
public KinectSensorInfo(SpeechRecognitionEngine speechRecognitionEngine)
{
_face = new FaceDetectionRecognition();
_speechRecognitionEngine = speechRecognitionEngine;
_sensor = KinectSensor.KinectSensors.FirstOrDefault();
if (_sensor == null)
{
throw new IDIRuntimeException("Can't find kinect sensor, is it connected?", null);
}
_sensor.ColorStream.Enable(ColorImageFormat.RgbResolution1280x960Fps12);
_sensor.SkeletonStream.Disable();
_sensor.DepthStream.Disable();
var audioSource = _sensor.AudioSource;
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
audioSource.EchoCancellationMode = EchoCancellationMode.CancellationAndSuppression;
audioSource.NoiseSuppression = true;
_sensor.Start();
var kinectStream = audioSource.Start();
_speechRecognitionEngine.SetInputToAudioStream(kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
}
示例3: SpeechRecognizer
public SpeechRecognizer(string file, KinectSensor sensor)
{
this.grammarFile = file;
this.kinectSensor = sensor;
audioSource = kinectSensor.AudioSource;
audioSource.AutomaticGainControlEnabled = false;
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
Func<RecognizerInfo, bool> matchingFunc = r =>
{
string value;
r.AdditionalInfo.TryGetValue("Kinect", out value);
return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
};
var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
if (recognizerInfo == null)
return;
speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
var grammar = new Grammar(grammarFile);
speechRecognitionEngine.LoadGrammar(grammar);
audioStream = audioSource.Start();
speechRecognitionEngine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
speechRecognitionEngine.AudioStateChanged += onAudioStateChanged;
speechRecognitionEngine.SpeechRecognized += onSpeechRecognized;
speechRecognitionEngine.RecognizeCompleted += onSpeechRecognizeCompleted;
speechRecognitionEngine.EmulateRecognizeCompleted += onEmulateRecognizeCompleted;
}
示例4: StartListening
public void StartListening()
{
if (null != _ri)
{
_speechEngine = new SpeechRecognitionEngine(_ri.Id);
// Create a grammar from grammar definition XML file.
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(KAIT.Kinect.Service.Properties.Resources.SpeechGrammar)))
{
var g = new Grammar(memoryStream);
_speechEngine.LoadGrammar(g);
}
_speechEngine.SpeechRecognized += _speechEngine_SpeechRecognized;
_speechEngine.SpeechRecognitionRejected += _speechEngine_SpeechRecognitionRejected;
// let the convertStream know speech is going active
_convertStream.SpeechActive = true;
// For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
// This will prevent recognition accuracy from degrading over time.
////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);
_speechEngine.SetInputToAudioStream(
_convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
_speechEngine.RecognizeAsync(RecognizeMode.Multiple);
//_isInTrainingMode = true;
}
//else
// throw new InvalidOperationException("RecognizerInfo cannot be null");
}
示例5: worker_DoWork
private void worker_DoWork(object sender, DoWorkEventArgs e)
{
Thread.CurrentThread.Name = "Kinect audio thread";
if(_device.Type == DeviceType.KINECT_1)
{
SpeechRecognizer = new SpeechRecognitionEngine(recognizerInfo.Id);
SpeechRecognizer.LoadGrammar(GetCurrentGrammar());
SpeechRecognizer.SpeechRecognized += SreSpeechRecognized;
SpeechRecognizer.SpeechHypothesized += SreSpeechHypothesized;
SpeechRecognizer.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
//set sensor audio source to variable
audioSource = _device.sensor.AudioSource;
//Set the beam angle mode - the direction the audio beam is pointing
//we want it to be set to adaptive
audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
//start the audiosource
var kinectStream = audioSource.Start();
//configure incoming audio stream
SpeechRecognizer.SetInputToAudioStream(
kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
//make sure the recognizer does not stop after completing
SpeechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
//reduce background and ambient noise for better accuracy
_device.sensor.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
_device.sensor.AudioSource.AutomaticGainControlEnabled = false;
audioStarted = true;
}
Console.WriteLine("END OF WORKER AUDIO");
}
示例6: VoiceControl
/// <summary>
/// Starts up the SkeletonSlam class.
/// </summary>
public VoiceControl()
{
kinectSensor = KinectSensor.KinectSensors[0];
TransformSmoothParameters smoothingParam = new TransformSmoothParameters();
{
smoothingParam.Smoothing = 0.5f;
smoothingParam.Correction = 0.5f;
smoothingParam.Prediction = 0.5f;
smoothingParam.JitterRadius = 0.05f;
smoothingParam.MaxDeviationRadius = 0.04f;
};
kinectSensor.SkeletonStream.Enable(smoothingParam);
kinectSensor.SkeletonFrameReady += getSkeleton;
sre = CreateSpeechRecognizer();
kinectSensor.Start();
sre.SetInputToAudioStream(kinectSensor.AudioSource.Start(),
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
sre.RecognizeAsync(RecognizeMode.Multiple);
reset();
}
示例7: SpeechManager
SpeechManager(String id, Stream audioStream, Stream grammar)
{
engine = new SpeechRecognitionEngine(id);
engine.LoadGrammar(new Grammar(grammar));
engine.SetInputToAudioStream(audioStream,
new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
engine.SpeechRecognized += OnSpeechRecognized;
}
示例8: Main
static void Main()
{
using (var source = new KinectAudioSource())
{
source.FeatureMode = true;
source.AutomaticGainControl = false;
source.SystemMode = SystemMode.OptibeamArrayOnly;
RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
return;
}
Console.WriteLine("Using: {0}", ri.Name);
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
//declare commands to be used
var commands = new Choices();
commands.Add("activate");
commands.Add("off");
commands.Add("open");
commands.Add("manual");
commands.Add("hold");
commands.Add("land");
commands.Add("stabilize");
var gb = new GrammarBuilder {Culture = ri.Culture};
//Specify the culture to match the recognizer in case we are running in a different culture.
gb.Append(commands);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechRecognitionRejected += SreSpeechRejected;
using (Stream s = source.Start())
{
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
Console.WriteLine("Recognizing... Press ENTER to stop");
sre.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("Stopping recognizer ...");
sre.RecognizeAsyncStop();
}
}
}
}
示例9: Main
static void Main(string[] args)
{
using (var source = new KinectAudioSource())
{
source.FeatureMode = true;
source.AutomaticGainControl = false; //Important to turn this off for speech recognition
source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample
RecognizerInfo ri = GetKinectRecognizer();
if (ri == null)
{
Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
return;
}
Console.WriteLine("Using: {0}", ri.Name);
using (var sre = new SpeechRecognitionEngine(ri.Id))
{
var colors = new Choices();
colors.Add("red");
colors.Add("green");
colors.Add("blue");
var gb = new GrammarBuilder();
//Specify the culture to match the recognizer in case we are running in a different culture.
gb.Culture = ri.Culture;
gb.Append(colors);
// Create the actual Grammar instance, and then load it into the speech recognizer.
var g = new Grammar(gb);
sre.LoadGrammar(g);
sre.SpeechRecognized += SreSpeechRecognized;
sre.SpeechHypothesized += SreSpeechHypothesized;
sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
using (Stream s = source.Start())
{
sre.SetInputToAudioStream(s,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
Console.WriteLine("Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop");
sre.RecognizeAsync(RecognizeMode.Multiple);
Console.ReadLine();
Console.WriteLine("Stopping recognizer ...");
sre.RecognizeAsyncStop();
}
}
}
}
示例10: StartSpeechRecognition
public void StartSpeechRecognition(Grammar grammer, System.EventHandler<System.Speech.Recognition.SpeechRecognizedEventArgs> speechRecognised, EventHandler<SpeechHypothesizedEventArgs> speechHypothesised, EventHandler<SpeechRecognitionRejectedEventArgs> speechRejected)
{
_speechRecogniser = CreateSpeechRecognizer(speechRecognised, speechHypothesised, speechRejected, grammer);
_controller.Sensor.Start();
_controller.AudioSource.BeamAngleMode = BeamAngleMode.Adaptive;
var audioStream = _controller.AudioSource.Start();
_speechRecogniser.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
_speechRecogniser.RecognizeAsync(RecognizeMode.Multiple);
_controller.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
_controller.AudioSource.AutomaticGainControlEnabled = false;
}
示例11: Start
public void Start()
{
if (_sre != null)
return;
_sre = StartKinect();
_sre.SpeechRecognized += SpeechRecognized;
_sre.SpeechRecognitionRejected += SpeechRejected;
if (_sensor != null)
_sre.SetInputToAudioStream(_sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
else
_sre.SetInputToDefaultAudioDevice();
RecogStackClear();
_listenCtx.BalloonTip(5, "KinectHaus\u2122", string.Format("My name is {0}", RecogIdle.Name), ListenIcon.Info);
_sre.RecognizeAsync(RecognizeMode.Multiple);
}
示例12: initSpeech
public void initSpeech()
{
kinectSource = new KinectAudioSource();
kinectSource.FeatureMode = true;
kinectSource.AutomaticGainControl = false;
kinectSource.SystemMode = SystemMode.OptibeamArrayOnly;
var rec = (from r in SpeechRecognitionEngine.InstalledRecognizers() where r.Id == RecognizerId select r).FirstOrDefault();
speechEngine = new SpeechRecognitionEngine(rec.Id);
var choices = new Choices();
choices.Add("select scalpal");
choices.Add("select syringe");
choices.Add("select suction");
choices.Add("select hand");
choices.Add("nurse scalpal");
choices.Add("nurse syringe");
choices.Add("nurse suction");
choices.Add("nurse hand");
choices.Add("show console");
choices.Add("hide console");
choices.Add("begin incision");
choices.Add("end incision");
choices.Add("inject");
GrammarBuilder gb = new GrammarBuilder();
gb.Culture = rec.Culture;
gb.Append(choices);
var g = new Grammar(gb);
speechEngine.LoadGrammar(g);
speechEngine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(sre_SpeechHypothesized);
speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);
Console.WriteLine("Recognizing Speech");
stream = kinectSource.Start();
speechEngine.SetInputToAudioStream(stream,
new SpeechAudioFormatInfo(
EncodingFormat.Pcm, 16000, 16, 1,
32000, 2, null));
speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
示例13: InicializarReconhecimentoVoz
public void InicializarReconhecimentoVoz(Stream fluxoAudio)
{
Func<RecognizerInfo, bool> encontrarIdioma = reconhecedor =>
{
string value;
reconhecedor.AdditionalInfo.TryGetValue("Kinect", out value);
return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase)
&& "en-US".Equals(reconhecedor.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
};
RecognizerInfo recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(encontrarIdioma).FirstOrDefault();
EngenhoReconhecimentoVoz = new SpeechRecognitionEngine(recognizerInfo.Id);
EngenhoReconhecimentoVoz.LoadGrammar(MetodoGerarGramatica());
EngenhoReconhecimentoVoz.SetInputToAudioStream(fluxoAudio, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
EngenhoReconhecimentoVoz.RecognizeAsync(RecognizeMode.Multiple);
}
开发者ID:gilgaljunior,项目名称:CrieAplicacoesInterativascomoMicrosoftKinect,代码行数:17,代码来源:InicializadorKinect.cs
示例14: SpeechRecognizer
public SpeechRecognizer(KinectSensor sensor)
{
this.sensor = sensor;
RecognizerInfo ri = GetKinectRecognizer();
this.speechEngine = new SpeechRecognitionEngine(ri);
// Create a grammar from grammar definition XML file.
using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
{
var g = new Grammar(memoryStream);
speechEngine.LoadGrammar(g);
}
speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(SpeechRecognized);
speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(SpeechRejected);
speechEngine.SetInputToAudioStream(
sensor.AudioSource.Start(), new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
示例15: Record
void Record()
{
KinectAudioSource source = kinectSensor.AudioSource;
Func<RecognizerInfo, bool> matchingFunc = r =>
{
string value;
r.AdditionalInfo.TryGetValue("Kinect", out value);
return
"True".Equals(value, StringComparison.InvariantCultureIgnoreCase) &&
"en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
};
var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
if (recognizerInfo == null)
return;
speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);
var gb = new GrammarBuilder { Culture = recognizerInfo.Culture };
gb.Append(choices);
var grammar = new Grammar(gb);
speechRecognitionEngine.LoadGrammar(grammar);
source.AutomaticGainControlEnabled = false;
source.BeamAngleMode = BeamAngleMode.Adaptive;
using (Stream sourceStream = source.Start())
{
speechRecognitionEngine.SetInputToAudioStream(sourceStream,
new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
while (isRunning)
{
RecognitionResult result = speechRecognitionEngine.Recognize();
if (result != null && OrderDetected != null && result.Confidence > 0.7)
OrderDetected(result.Text);
}
}
}