本文整理汇总了C#中SpeechSynthesizer.SpeakSsml方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechSynthesizer.SpeakSsml方法的具体用法?C# SpeechSynthesizer.SpeakSsml怎么用?C# SpeechSynthesizer.SpeakSsml使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SpeechSynthesizer
的用法示例。
在下文中一共展示了SpeechSynthesizer.SpeakSsml方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: TestPhonemes
public void TestPhonemes()
{
EventWaitHandle waitHandle = new AutoResetEvent(false);
using (MemoryStream stream = new MemoryStream())
using (SpeechSynthesizer synth = new SpeechSynthesizer())
{
synth.SetOutputToWaveStream(stream);
synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"UTF-8\"?><speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>This is your <phoneme alphabet=\"ipa\" ph=\"leɪkɒn\">Lakon</phoneme>.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈdɛltə\">delta</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"bliːiː\">Bleae</phoneme> <phoneme alphabet=\"ipa\" ph=\"θuːə\">Thua</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the Amnemoi system.</s></speak>");
//synth.Speak("You are travelling to the Barnard's Star system.");
stream.Seek(0, SeekOrigin.Begin);
IWaveSource source = new WaveFileReader(stream);
var soundOut = new WasapiOut();
soundOut.Stopped += (s, e) => waitHandle.Set();
soundOut.Initialize(source);
soundOut.Play();
waitHandle.WaitOne();
soundOut.Dispose();
source.Dispose();
}
}
示例2: Speak
public void Speak(string script, string voice, int echoDelay, int distortionLevel, int chorusLevel, int reverbLevel, int compressLevel, bool radio)
{
if (script == null) { return; }
try
{
using (SpeechSynthesizer synth = new SpeechSynthesizer())
using (MemoryStream stream = new MemoryStream())
{
if (String.IsNullOrWhiteSpace(voice))
{
voice = configuration.StandardVoice;
}
if (voice != null)
{
try
{
synth.SelectVoice(voice);
}
catch { }
}
synth.Rate = configuration.Rate;
synth.SetOutputToWaveStream(stream);
string speech = SpeechFromScript(script);
if (speech.Contains("<phoneme"))
{
speech = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"" + locale + "\"><s>" + speech + "</s></speak>";
synth.SpeakSsml(speech);
}
else
{
synth.Speak(speech);
}
stream.Seek(0, SeekOrigin.Begin);
using (System.IO.StreamWriter file = new System.IO.StreamWriter(Environment.GetEnvironmentVariable("AppData") + @"\EDDI\speech.log", true)) { file.WriteLine("" + System.Threading.Thread.CurrentThread.ManagedThreadId + ": Turned script " + script + " in to speech " + speech); }
IWaveSource source = new WaveFileReader(stream);
// We need to extend the duration of the wave source if we have any effects going on
if (chorusLevel != 0 || reverbLevel != 0 || echoDelay != 0)
{
// Add a base of 500ms plus 10ms per effect level over 50
source = source.AppendSource(x => new ExtendedDurationWaveSource(x, 500 + Math.Max(0, (configuration.EffectsLevel - 50) * 10)));
}
// Add various effects...
// We always have chorus
if (chorusLevel != 0)
{
source = source.AppendSource(x => new DmoChorusEffect(x) { Depth = chorusLevel, WetDryMix = Math.Min(100, (int)(180 * ((decimal)configuration.EffectsLevel) / ((decimal)100))), Delay = 16, Frequency = 2, Feedback = 25 });
}
// We only have reverb and echo if we're not transmitting or receiving
if (!radio)
{
if (reverbLevel != 0)
{
// We tone down the reverb level with the distortion level, as the combination is nasty
source = source.AppendSource(x => new DmoWavesReverbEffect(x) { ReverbTime = (int)(1 + 999 * ((decimal)configuration.EffectsLevel) / ((decimal)100)), ReverbMix = Math.Max(-96, -96 + (96 * reverbLevel / 100) - distortionLevel) });
}
if (echoDelay != 0)
{
// We tone down the echo level with the distortion level, as the combination is nasty
source = source.AppendSource(x => new DmoEchoEffect(x) { LeftDelay = echoDelay, RightDelay = echoDelay, WetDryMix = Math.Max(5, (int)(10 * ((decimal)configuration.EffectsLevel) / ((decimal)100)) - distortionLevel), Feedback = Math.Max(0, 10 - distortionLevel / 2) });
}
}
if (configuration.EffectsLevel > 0 && distortionLevel > 0)
{
source = source.AppendSource(x => new DmoDistortionEffect(x) { Edge = distortionLevel, Gain = -6 - (distortionLevel / 2), PostEQBandwidth = 4000, PostEQCenterFrequency = 4000 });
}
if (radio)
{
source = source.AppendSource(x => new DmoDistortionEffect(x) { Edge = 7, Gain = -4 - distortionLevel / 2, PostEQBandwidth = 2000, PostEQCenterFrequency = 6000 });
source = source.AppendSource(x => new DmoCompressorEffect(x) { Attack = 1, Ratio = 3, Threshold = -10 });
}
EventWaitHandle waitHandle = new EventWaitHandle(false, EventResetMode.AutoReset);
var soundOut = new WasapiOut();
soundOut.Initialize(source);
soundOut.Stopped += (s, e) => waitHandle.Set();
activeSpeeches.Add(soundOut);
soundOut.Play();
// Add a timeout, in case it doesn't come back
waitHandle.WaitOne(source.GetTime(source.Length));
// It's possible that this has been disposed of, so ensure that it's still there before we try to finish it
lock (activeSpeeches)
{
if (activeSpeeches.Contains(soundOut))
{
activeSpeeches.Remove(soundOut);
//.........这里部分代码省略.........
示例3: MakeAudioFile
public FileInfo MakeAudioFile(string ssmlToSpeak)
{
string wavFileName = DateTime.Now.ToString().Replace(" ", "");
//AddInHost.Current.MediaCenterEnvironment.Dialog("Doing the playback", "", DialogButtons.Ok, 5, true);
wavFileName = System.Environment.GetEnvironmentVariable("TEMP") + "\\" + wavFileName.Replace("-", "").Replace("/", "").Replace(":", "") + ".wav";
//EventLog.WriteEntry("VmcController Client AddIn", "Generating speech file to " + wavFileName);
ssmlToSpeak = "<speak version='1.0' xml:lang='en-US'>" + ssmlToSpeak + "<break time='2000ms'/></speak>";
using (SpeechSynthesizer synth = new SpeechSynthesizer())
{
synth.SetOutputToWaveFile(wavFileName);
synth.SpeakSsml(ssmlToSpeak);
}
//EventLog.WriteEntry("VmcController Client AddIn", "Speech file written to " + wavFileName);
FileInfo fi = new FileInfo(wavFileName);
return fi;
}
示例4: TestPhonemes
public void TestPhonemes()
{
EventWaitHandle waitHandle = new AutoResetEvent(false);
using (MemoryStream stream = new MemoryStream())
using (SpeechSynthesizer synth = new SpeechSynthesizer())
{
synth.SetOutputToWaveStream(stream);
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"viˈga\">Vega</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ækɜˈnɑ\">Achenar</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈsɪɡni\">Cygni</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈsɪɡnəs\">Cygnus</phoneme> system.</s></speak>");
// synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ʃɪnˈrɑːrtə\">Shinrarta</phoneme> <phoneme alphabet=\"ipa\" ph=\"ˈdezɦrə\">Dezhra</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈnjuːənɛts\">Reorte</phoneme> system.</s></speak>");
synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the Eravate system.</s></speak>");
stream.Seek(0, SeekOrigin.Begin);
IWaveSource source = new WaveFileReader(stream);
var soundOut = new WasapiOut();
soundOut.Stopped += (s, e) => waitHandle.Set();
soundOut.Initialize(source);
soundOut.Play();
waitHandle.WaitOne();
soundOut.Dispose();
source.Dispose();
}
}
示例5: DownloadAndPlayItem
public override void DownloadAndPlayItem(PhraseItem item, string folder)
{
String SSMLText = String.Format(@"<?xml version=""1.0""?>
<speak xmlns=""http://www.w3.org/2001/10/synthesis"" version=""1.0"" xml:lang=""{1}"">
<p>
<s>{0}</s>
</p>
</speak>
", item.Phrase.Replace("&", "&"), SelectedVoice.Language);
try
{
new Task(() =>
{
using (MemoryStream ms = new MemoryStream())
{
ms.Seek(0, SeekOrigin.Begin);
using (var synth = new SpeechSynthesizer())
{
synth.SelectVoice(SelectedVoice.Name);
synth.Volume = Int32.Parse(SelectedDiscreteVolume);
synth.Rate = Int32.Parse(SelectedDiscreteSpeed);
synth.SetOutputToWaveStream(ms);//.SetOutputToWaveFile(String.Format("{0}\\wav22050\\{1}\\{2}.wav", folder, item.Folder, item.FileName));
synth.SpeakSsml(SSMLText);
}
ms.Seek(0, SeekOrigin.Begin);
using (WaveFileReader wav = new WaveFileReader(ms))// String.Format("{0}\\wav22050\\{1}\\{2}.wav", folder, item.Folder, item.FileName)))
{
using (FileStream fs = new FileStream(String.Format("{0}\\mp3\\{1}\\{2}.mp3", folder, item.Folder, item.FileName), FileMode.Create))
{
using (var writer = new LameMP3FileWriter(fs, wav.WaveFormat, 128))
{
wav.CopyTo(writer);
}
}
}
ms.Seek(0, SeekOrigin.Begin);
using (WaveFileReader wav = new WaveFileReader(ms))// String.Format("{0}\\wav22050\\{1}\\{2}.wav", folder, item.Folder, item.FileName)))
{
ConvertToWav(item, folder, true, new String[] { Name, SelectedVoice.Name, SelectedDiscreteSpeed, SelectedDiscreteVolume });
}
}
}).Start();
}
catch (Exception Ex)
{
Logger.Log(Ex.ToString());
}
}
示例6: Speak
public void Speak(string speech, string voice, int echoDelay, int distortionLevel, int chorusLevel, int reverbLevel, int compressLevel, bool wait = true, int priority = 3)
{
if (speech == null) { return; }
Thread speechThread = new Thread(() =>
{
string finalSpeech = null;
try
{
using (SpeechSynthesizer synth = new SpeechSynthesizer())
using (MemoryStream stream = new MemoryStream())
{
if (string.IsNullOrWhiteSpace(voice))
{
voice = configuration.StandardVoice;
}
if (voice != null && !voice.Contains("Microsoft Server Speech Text to Speech Voice"))
{
try
{
Logging.Debug("Selecting voice " + voice);
synth.SelectVoice(voice);
Logging.Debug("Selected voice " + synth.Voice.Name);
}
catch (Exception ex)
{
Logging.Error("Failed to select voice " + voice, ex);
}
}
Logging.Debug("Post-selection");
Logging.Debug("Configuration is " + configuration == null ? "<null>" : JsonConvert.SerializeObject(configuration));
synth.Rate = configuration.Rate;
Logging.Debug("Rate is " + synth.Rate);
synth.Volume = configuration.Volume;
Logging.Debug("Volume is " + synth.Volume);
synth.StateChanged += new EventHandler<StateChangedEventArgs>(synth_StateChanged);
Logging.Debug("Tracking state changes");
synth.SetOutputToWaveStream(stream);
Logging.Debug("Output set to stream");
if (speech.Contains("<phoneme") || speech.Contains("<break"))
{
Logging.Debug("Speech is SSML");
if (configuration.DisableSsml)
{
Logging.Debug("Disabling SSML at user request");
// User has disabled SSML so remove it
finalSpeech = Regex.Replace(speech, "<.*?>", string.Empty);
synth.Speak(finalSpeech);
}
else
{
Logging.Debug("Obtaining best guess culture");
string culture = bestGuessCulture(synth);
Logging.Debug("Best guess culture is " + culture);
finalSpeech = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"" + bestGuessCulture(synth) + "\"><s>" + speech + "</s></speak>";
Logging.Debug("SSML speech: " + finalSpeech);
try
{
Logging.Debug("Speaking SSML");
synth.SpeakSsml(finalSpeech);
Logging.Debug("Finished speaking SSML");
}
catch (Exception ex)
{
Logging.Error("Best guess culture of " + bestGuessCulture(synth) + " for voice " + synth.Voice.Name + " was incorrect", ex);
Logging.Info("SSML does not work for the chosen voice; falling back to normal speech");
// Try again without Ssml
finalSpeech = Regex.Replace(speech, "<.*?>", string.Empty);
synth.Speak(finalSpeech);
}
}
}
else
{
Logging.Debug("Speech does not contain SSML");
Logging.Debug("Speech: " + speech);
finalSpeech = speech;
Logging.Debug("Speaking normal speech");
synth.Speak(finalSpeech);
Logging.Debug("Finished speaking normal speech");
}
Logging.Debug("Seeking back to the beginning of the stream");
stream.Seek(0, SeekOrigin.Begin);
Logging.Debug("Setting up source from stream");
IWaveSource source = new WaveFileReader(stream);
// We need to extend the duration of the wave source if we have any effects going on
if (chorusLevel != 0 || reverbLevel != 0 || echoDelay != 0)
{
// Add a base of 500ms plus 10ms per effect level over 50
Logging.Debug("Extending duration by " + 500 + Math.Max(0, (configuration.EffectsLevel - 50) * 10) + "ms");
source = source.AppendSource(x => new ExtendedDurationWaveSource(x, 500 + Math.Max(0, (configuration.EffectsLevel - 50) * 10)));
}
// Add various effects...
Logging.Debug("Effects level is " + configuration.EffectsLevel + ", chorus level is " + chorusLevel + ", reverb level is " + reverbLevel + ", echo delay is " + echoDelay);
// We always have chorus
//.........这里部分代码省略.........