本文整理汇总了C#中SpeechSynthesizer.SetOutputToAudioStream方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechSynthesizer.SetOutputToAudioStream方法的具体用法?C# SpeechSynthesizer.SetOutputToAudioStream怎么用?C# SpeechSynthesizer.SetOutputToAudioStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechSynthesizer
的用法示例。
在下文中一共展示了SpeechSynthesizer.SetOutputToAudioStream方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SpeakMessage
public void SpeakMessage(AudioVideoFlow flow, string message)
{
try
{
SpeechSynthesizer synth = new SpeechSynthesizer();
SpeechAudioFormatInfo formatInfo = new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
SpeechSynthesisConnector connector = new SpeechSynthesisConnector();
synth.SetOutputToAudioStream(connector.Stream, formatInfo);
connector.AttachFlow(flow);
connector.Start();
synth.SpeakCompleted += new EventHandler<SpeakCompletedEventArgs>(
(sender, args) =>
{
connector.Stop();
synth.Dispose();
});
synth.SpeakAsync(message);
}
catch (Exception ex)
{
Console.WriteLine("Failed to play the message. {0}", ex);
}
}
示例2: SynthToCam
private static void SynthToCam(string text, CameraWindow cw)
{
var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null);
using (var synthesizer = new SpeechSynthesizer())
{
using (var waveStream = new MemoryStream())
{
//write some silence to the stream to allow camera to initialise properly
var silence = new byte[1 * 22050];
waveStream.Write(silence, 0, silence.Length);
var pbuilder = new PromptBuilder();
var pStyle = new PromptStyle
{
Emphasis = PromptEmphasis.Strong,
Rate = PromptRate.Slow,
Volume = PromptVolume.ExtraLoud
};
pbuilder.StartStyle(pStyle);
pbuilder.StartParagraph();
pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2);
pbuilder.StartSentence();
pbuilder.AppendText(text);
pbuilder.EndSentence();
pbuilder.EndVoice();
pbuilder.EndParagraph();
pbuilder.EndStyle();
synthesizer.SetOutputToAudioStream(waveStream, synthFormat);
synthesizer.Speak(pbuilder);
synthesizer.SetOutputToNull();
//write some silence to the stream to allow camera to end properly
waveStream.Write(silence, 0, silence.Length);
waveStream.Seek(0, SeekOrigin.Begin);
var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) };
var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds);
ds.Start();
talkTarget.Start();
while (ds.IsRunning)
{
Thread.Sleep(100);
}
ds.Stop();
talkTarget.Stop();
talkTarget = null;
ds = null;
}
}
}
示例3: AddEndpoint
public void AddEndpoint(string ID, Stream outstream)
{
SpeechSynthesizer voice = new SpeechSynthesizer();
if (outstream == null) voice.SetOutputToDefaultAudioDevice();
else voice.SetOutputToAudioStream(outstream, new System.Speech.AudioFormat.SpeechAudioFormatInfo(
16000, System.Speech.AudioFormat.AudioBitsPerSample.Sixteen, System.Speech.AudioFormat.AudioChannel.Mono));
//if (chkIVONA.Checked) voice.SelectVoice("IVONA 2 Amy");
//else voice.SelectVoice("Microsoft Anna")
voices.Add(ID, voice);
outStreams.Add(ID, outstream);
}
示例4: StartWithWelcome
public void StartWithWelcome(AudioVideoFlow flow, LyncServer server)
{
_flow = flow;
_server = server;
//attach speech synthasis to audio flow
_speechSynthesisConnector.AttachFlow(_flow);
_speechSynthesizer = new SpeechSynthesizer();
_speechSynthesizer.SetOutputToAudioStream(_speechSynthesisConnector, audioformat);
_speechSynthesizer.SelectVoice("Microsoft Hazel Desktop"); //slightly more english
var toneController = new ToneController(); //this is for the DTMF tones
toneController.AttachFlow(_flow);
_speechSynthesisConnector.Start();
_speechSynthesizer.Speak("Welcome to the UCMA IVR Demo!");
SpeakMenuOptions();
toneController.ToneReceived += toneController_ToneReceived;
}
示例5: OnLoad
protected override void OnLoad(EventArgs e)
{
Visible = false;
ShowInTaskbar = false;
base.OnLoad(e);
/*
* Get all installed voices
*
*/
var voices = speech.GetInstalledVoices();
string voice = "";
foreach (InstalledVoice v in voices)
{
if (v.Enabled)
//voice = v.VoiceInfo.Name;
Console.WriteLine(v.VoiceInfo.Name);
}
queuetimer = new System.Timers.Timer(250);
queuetimer.Elapsed += (object sender, ElapsedEventArgs ev) =>
{
TTSRequest r;
if (Queue.TryDequeue(out r))
{
Console.WriteLine("dequeing off of concurrent queue...");
if (r.Interrupt)
{
// stop current TTS
if (IsSpeaking)
{
//speech.StopSpeaking();
}
if (IsSounding)
{
//sound.Stop();
if(sound.PlaybackState == PlaybackState.Playing) {
sound.Stop();
}
}
// clear queue
SpeechQueue.Clear();
}
if(!r.Reset) {
SpeechQueue.Enqueue(r);
}
RequestCount++;
}
var eventdata = new Hashtable();
eventdata.Add("ProcessedRequests", RequestCount);
eventdata.Add("QueuedRequests", SpeechQueue.Count);
eventdata.Add("IsSpeaking", IsSounding);
InstrumentationEvent blam = new InstrumentationEvent();
blam.EventName = "status";
blam.Data = eventdata;
NotifyGui(blam.EventMessage());
};
// when this timer fires, it will pull off of the speech queue and speak it
// the long delay also adds a little pause between tts requests.
speechtimer = new System.Timers.Timer(250);
speechtimer.Elapsed += (object sender, ElapsedEventArgs ev) =>
{
if (IsSpeaking.Equals(false))
{
if (SpeechQueue.Count > 0)
{
TTSRequest r = SpeechQueue.Dequeue();
Console.WriteLine("dequeuing off of speech queue");
IsSpeaking = true;
speechtimer.Enabled = false;
//speech.SpeakAsync(r.Text);
//using (speech = new SpeechSynthesizer()) {
speech = new SpeechSynthesizer();
speech.SpeakCompleted += speech_SpeakCompleted;
format = new SpeechAudioFormatInfo(EncodingFormat.ALaw, 8000, 8, 1, 1, 2, null);
//format = new SpeechAudioFormatInfo(11025, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
// var si = speech.GetType().GetMethod("SetOutputStream", BindingFlags.Instance | BindingFlags.NonPublic);
stream = new MemoryStream();
//si.Invoke(speech, new object[] { stream, format, true, true });
//speech.SetOutputToWaveStream(stream);
speech.SetOutputToAudioStream(stream, format);
speech.SelectVoice(config.getVoice (r.Language, r.Voice));
int rate = (r.Speed * 2 - 10);
Console.WriteLine(rate);
try
{
speech.Rate = rate;
}
catch (ArgumentOutOfRangeException ex)
{
speech.Rate = 0;
}
speech.SpeakAsync(r.Text);
//.........这里部分代码省略.........
示例6: GenerateVoiceAnnouncement
private static byte[] GenerateVoiceAnnouncement(string announcement)
{
var synthesizer = new SpeechSynthesizer();
var waveStream = new MemoryStream();
var firstOrDefault = synthesizer.GetInstalledVoices()
.FirstOrDefault(x => x.VoiceInfo.Name.ToUpper().Contains("DAVID"));
if (firstOrDefault != null)
synthesizer.SelectVoice(
firstOrDefault
.VoiceInfo.Name);
synthesizer.SetOutputToAudioStream(waveStream,
new SpeechAudioFormatInfo(EncodingFormat.Pcm,
44100, 16, 2, 176400, 2, null));
synthesizer.Volume = 100;
synthesizer.Rate = 1;
synthesizer.Speak(announcement);
synthesizer.SetOutputToNull();
return waveStream.ToArray();
}
示例7: SendTextToSpeech
private void SendTextToSpeech(object obj) {
Connection c = (Connection)obj;
ConnectionStream stream = new ConnectionStream(c);
SpeechSynthesizer s = new SpeechSynthesizer();
using (MemoryStream memStream = new MemoryStream()) {
s.SetOutputToAudioStream(memStream, new SpeechAudioFormatInfo(EncodingFormat.ALaw,
8000, 8, 1, 8000, 1, null));
s.Speak(_textToSpeak);
memStream.Seek(0, SeekOrigin.Begin);
using (ConnectionWriter writer = new ConnectionWriter(stream)) {
writer.Reverse = true;
writer.Write(memStream);
}
}
}