本文整理汇总了C#中SpeechSynthesizer类的典型用法代码示例。如果您正苦于以下问题:C# SpeechSynthesizer类的具体用法?C# SpeechSynthesizer怎么用?C# SpeechSynthesizer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SpeechSynthesizer类属于命名空间,在下文中一共展示了SpeechSynthesizer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: populate_fields
private void populate_fields()
{
List<String> localizations = new List<String>();
foreach (RecognizerInfo ri in SpeechRecognitionEngine.InstalledRecognizers())
{
localizations.Add(ri.Culture.Name);
}
cbSettingsLanguage.DataSource = localizations;
List<String> voices = new List<string>();
SpeechSynthesizer synthesizer = new SpeechSynthesizer();
foreach (InstalledVoice voice in synthesizer.GetInstalledVoices())
{
voices.Add(voice.VoiceInfo.Name);
}
cbSettingsSynthesizer.DataSource = voices;
List<string> audio_devices = new List<string>();
//TODO: actually enumerate audio devices.
audio_devices.Add("Default Directsound device");
cbSettingsRecordingDevice.DataSource = audio_devices;
List<string> pushtotalk_mode_list = new List<string>();
pushtotalk_mode_list.Add("Off");
pushtotalk_mode_list.Add("Hold");
pushtotalk_mode_list.Add("PressOnce");
cbSettingsPushToTalkMode.DataSource = pushtotalk_mode_list;
cbSettingsPushToTalkMode.SelectedItem = vi_settings.pushtotalk_mode;
cbSettingsPushToTalkKey.DataSource = Enum.GetValues(typeof(Keys)).Cast<Keys>();
cbSettingsPushToTalkKey.SelectedItem = Enum.Parse(typeof(Keys), vi_settings.pushtotalk_key);
}
示例2: SetupSpeechSynthesizer
/************ Helper Methods ************/
// Helper Methods For Speech Synthesis Engine
private void SetupSpeechSynthesizer()
{
// Start speech engine
m_speechSynthesizer = new SpeechSynthesizer();
// Do cute things with voice here
}
示例3: MediaElement
MediaElement mediaElement = new MediaElement(); // Pamięć jest przydzielana tylko raz, dynamicznie
async void speakString(string text)
{
var synth = new SpeechSynthesizer();
SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(text);
mediaElement.SetSource(stream, stream.ContentType);
mediaElement.Play();
}
示例4: GameplayScreen
/// <summary>
/// Constructor.
/// </summary>
public GameplayScreen(Level lvl)
{
Narrator = new SpeechSynthesizer();
TransitionOnTime = TimeSpan.FromSeconds(1.5);
TransitionOffTime = TimeSpan.FromSeconds(0.5);
mLevel = lvl;
}
示例5: btnStop_Click
private void btnStop_Click(object sender, EventArgs e)
{
synt.Pause();
synt = new SpeechSynthesizer();
play = false;
textBox1.Text = string.Empty;
}
示例6: Speaker
public Speaker()
{
if (null==msSpeech)
{
msSpeech = new SpeechSynthesizer();
}
}
示例7: sayword
/// <summary>
/// 语音提示
/// <summary>
public void sayword(string strAlarm)
{
try
{
SpeechSynthesizer Talker = new SpeechSynthesizer();
Talker.Rate = 2;//控制语速(-10--10)
Talker.Volume = 100;//控制音量
#region 获取本机上所安装的所有的Voice的名称
//string voicestring = "";
//foreach (InstalledVoice iv in Talker.GetInstalledVoices())
//{
// voicestring += iv.VoiceInfo.Name + ",";
//}
//Microsoft Mary,Microsoft Mike,Microsoft Sam,Microsoft Simplified Chinese,SampleTTSVoice
//Talker.SelectVoice("Microsoft Mary");
#endregion
//Talker.SetOutputToWaveFile("c:\soundfile.wav");//读取文件
Talker.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Child, 2, System.Globalization.CultureInfo.CurrentCulture);
Talker.SpeakAsync(strAlarm);
}
catch (Exception ex)
{
Log.WriteLog("语音提示控件" + ex.ToString());
}
}
示例8: TestPhonemes
public void TestPhonemes()
{
EventWaitHandle waitHandle = new AutoResetEvent(false);
using (MemoryStream stream = new MemoryStream())
using (SpeechSynthesizer synth = new SpeechSynthesizer())
{
synth.SetOutputToWaveStream(stream);
synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"UTF-8\"?><speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>This is your <phoneme alphabet=\"ipa\" ph=\"leɪkɒn\">Lakon</phoneme>.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈdɛltə\">delta</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"bliːiː\">Bleae</phoneme> <phoneme alphabet=\"ipa\" ph=\"θuːə\">Thua</phoneme> system.</s></speak>");
//synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the Amnemoi system.</s></speak>");
//synth.Speak("You are travelling to the Barnard's Star system.");
stream.Seek(0, SeekOrigin.Begin);
IWaveSource source = new WaveFileReader(stream);
var soundOut = new WasapiOut();
soundOut.Stopped += (s, e) => waitHandle.Set();
soundOut.Initialize(source);
soundOut.Play();
waitHandle.WaitOne();
soundOut.Dispose();
source.Dispose();
}
}
示例9: GenerateTTS
private void GenerateTTS(Configuration cfg)
{
this.InitDestinationDirectory(cfg);
using (var synthesizer = new SpeechSynthesizer())
{
this.InitializeSpeechSynthesizer(cfg, synthesizer);
var xmlDoc = this.LoadXmlDocument(cfg);
var rows = this.LoadRows(cfg, xmlDoc);
foreach (var row in rows)
{
string id = this.GetElementValue(cfg, row, cfg.IDElementName);
Console.WriteLine(id);
byte i = 0;
foreach (var textElementName in cfg.TextElementNames)
{
string textElement = this.GetElementValue(cfg, row, textElementName);
char postFix = (char)((byte)'A' + i);
string filename = cfg.Mp3FilePrefix + id + postFix + ".mp3";
string fullFilename = Path.Combine(cfg.DestinationDirectory, filename);
this.SpeakToMp3(cfg, synthesizer, fullFilename, textElement);
i++;
}
}
}
}
示例10: Sound
static Sound()
{
voice = new SpeechSynthesizer();
voice.Volume = 100;
voice.Rate = 8 * voice.Rate / 3;
}
示例11: Main
static void Main(string[] args)
{
SpeechSynthesizer speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.Speak("Welcome to the speaking performance monitor!");
#region Performance Counters
PerformanceCounter performanceCounter_CPU = new PerformanceCounter("Processor Information", "% Processor Time", "_Total");
PerformanceCounter performanceCounter_RAM = new PerformanceCounter("Memory", "% Committed Bytes In Use");
PerformanceCounter performanceCounter_TIME = new PerformanceCounter("System", "System Up Time");
#endregion
while (true)
{
float currentCPUPercentage = performanceCounter_CPU.NextValue();
float currentRAMPercentage = performanceCounter_RAM.NextValue();
Console.WriteLine("CPU Load: {0}%", currentCPUPercentage);
Console.WriteLine("RAM Usage: {0}%", currentRAMPercentage);
string cpuLoadVocalMessage = String.Format("The current CPU load is: {0}", currentCPUPercentage);
string ramLoadVocalMessage = String.Format("The current RAM usage is: {0}", currentRAMPercentage);
Thread.Sleep(500);
}
}
示例12: SpeakClick
private async void SpeakClick(object sender, RoutedEventArgs e)
{
if (string.IsNullOrEmpty(inputTextBox.Text))
MessageBox.Show("Introduce algun texto a leer.");
else
{
try
{
SpeechSynthesizer synth = new SpeechSynthesizer();
var voices = InstalledVoices.All.Where(v => v.Language == "es-ES").OrderByDescending(v => v.Gender);
VoiceGender gender = VoiceGender.Male;
if (rbMale.IsChecked == true)
gender = VoiceGender.Male;
else
gender = VoiceGender.Female;
synth.SetVoice(voices.Where(v => v.Gender == gender).FirstOrDefault());
await synth.SpeakTextAsync(inputTextBox.Text);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
}
示例13: SpeechConversation
public SpeechConversation(SpeechSynthesizer speechSynthesizer = null, SpeechRecognitionEngine speechRecognition = null)
{
SessionStorage = new SessionStorage();
if(speechSynthesizer==null)
{
speechSynthesizer = new SpeechSynthesizer();
speechSynthesizer.SetOutputToDefaultAudioDevice();
}
_speechSynthesizer = speechSynthesizer;
if(speechRecognition==null)
{
speechRecognition = new SpeechRecognitionEngine(
new System.Globalization.CultureInfo("en-US")
);
// Create a default dictation grammar.
DictationGrammar defaultDictationGrammar = new DictationGrammar();
defaultDictationGrammar.Name = "default dictation";
defaultDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(defaultDictationGrammar);
// Create the spelling dictation grammar.
DictationGrammar spellingDictationGrammar = new DictationGrammar("grammar:dictation#spelling");
spellingDictationGrammar.Name = "spelling dictation";
spellingDictationGrammar.Enabled = true;
speechRecognition.LoadGrammar(spellingDictationGrammar);
// Configure input to the speech recognizer.
speechRecognition.SetInputToDefaultAudioDevice();
}
_speechRecognition = speechRecognition;
}
示例14: StartAsync
public override async Task StartAsync()
{
_synth = new SpeechSynthesizer();
await Log.ReportInfoFormatAsync(CancellationToken, "{0} started", Name);
NotifyEntityChangeContext.ChangeNotifications<DeviceValue>.OnEntityUpdated += SpeechPlugin_OnEntityUpdated;
_synth.SpeakAsync("Speech Started!");
}
示例15: MainForm
public MainForm()
{
InitializeComponent();
message = new Dictionary<char, string>() {
{'+',"increase screen opacity"},
{'-',"decrease screen opacity"},
{'1',"change color"},
{'2',"change color"},
{'3',"red light on"},
{'4',"green light on"},
{'5',"switch to night time"},
{'6',"switch to day time"},
{'7',"make snows"},
{'8',"show rainbow"},
{'9',"show bunny"},
{'0',"make the bunny jump"}
};
synth = new SpeechSynthesizer();
synth.SetOutputToDefaultAudioDevice();
cars = new List<Car>();
bunny = new Bunny(new Point(20, 20));
rainbow = new Rainbow(new Point(0, 0));
snow = new Snow(new Point(0, 0));
traffic = new Traffic();
sun = new Sun(new Point(Size.Width - 250, 10));
moon = new Moon(new Point(Size.Width - 250, 10));
carSound = new SoundPlayer(@"carSounds.wav");
windSound = new SoundPlayer(@"Wind.wav");
brake = new SoundPlayer(@"car-brake.wav");
}