当前位置: 首页>>代码示例>>C#>>正文


C# SpeechSynthesizer.SetOutputToNull方法代码示例

本文整理汇总了C#中SpeechSynthesizer.SetOutputToNull方法的典型用法代码示例。如果您正苦于以下问题:C# SpeechSynthesizer.SetOutputToNull方法的具体用法?C# SpeechSynthesizer.SetOutputToNull怎么用?C# SpeechSynthesizer.SetOutputToNull使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在SpeechSynthesizer的用法示例。


在下文中一共展示了SpeechSynthesizer.SetOutputToNull方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: SynthToCam

        private static void SynthToCam(string text, CameraWindow cw)
        {
            var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null);
            using (var synthesizer = new SpeechSynthesizer())
            {
                using (var waveStream = new MemoryStream())
                {

                    //write some silence to the stream to allow camera to initialise properly
                    var silence = new byte[1 * 22050];
                    waveStream.Write(silence, 0, silence.Length);

                    var pbuilder = new PromptBuilder();
                    var pStyle = new PromptStyle
                    {
                        Emphasis = PromptEmphasis.Strong,
                        Rate = PromptRate.Slow,
                        Volume = PromptVolume.ExtraLoud
                    };

                    pbuilder.StartStyle(pStyle);
                    pbuilder.StartParagraph();
                    pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2);
                    pbuilder.StartSentence();
                    pbuilder.AppendText(text);
                    pbuilder.EndSentence();
                    pbuilder.EndVoice();
                    pbuilder.EndParagraph();
                    pbuilder.EndStyle();

                    synthesizer.SetOutputToAudioStream(waveStream, synthFormat);
                    synthesizer.Speak(pbuilder);
                    synthesizer.SetOutputToNull();

                    //write some silence to the stream to allow camera to end properly
                    waveStream.Write(silence, 0, silence.Length);
                    waveStream.Seek(0, SeekOrigin.Begin);

                    var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) };
                    var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); 
                    ds.Start();
                    talkTarget.Start();
                    while (ds.IsRunning)
                    {
                        Thread.Sleep(100);
                    }
                    ds.Stop();
                    talkTarget.Stop();
                    talkTarget = null;
                    ds = null;
                }
            }


        }
开发者ID:tdhieu,项目名称:iSpy,代码行数:55,代码来源:SpeechSynth.cs

示例2: OnLoad


//.........这里部分代码省略.........
                            }
                        // clear queue
                        SpeechQueue.Clear();
                    }
                    if(!r.Reset) {
                        SpeechQueue.Enqueue(r);
                    }
                    RequestCount++;
                }
                
                var eventdata = new Hashtable();
                eventdata.Add("ProcessedRequests", RequestCount);
                eventdata.Add("QueuedRequests", SpeechQueue.Count);
                eventdata.Add("IsSpeaking", IsSounding);
                InstrumentationEvent blam = new InstrumentationEvent();
                blam.EventName = "status";
                blam.Data = eventdata;
                NotifyGui(blam.EventMessage());  
            };

            // when this timer fires, it will pull off of the speech queue and speak it
            // the long delay also adds a little pause between tts requests.
            speechtimer = new System.Timers.Timer(250);
            speechtimer.Elapsed += (object sender, ElapsedEventArgs ev) =>
            {
                if (IsSpeaking.Equals(false))
                {
                    if (SpeechQueue.Count > 0)
                    {
                        TTSRequest r = SpeechQueue.Dequeue();
                        Console.WriteLine("dequeuing off of speech queue");
                        IsSpeaking = true;
                        speechtimer.Enabled = false;

                        //speech.SpeakAsync(r.Text);

                        //using (speech = new SpeechSynthesizer()) {
                        speech = new SpeechSynthesizer();
                            speech.SpeakCompleted += speech_SpeakCompleted;
                            format = new SpeechAudioFormatInfo(EncodingFormat.ALaw, 8000, 8, 1, 1, 2, null);
                            //format = new SpeechAudioFormatInfo(11025, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
                           // var si = speech.GetType().GetMethod("SetOutputStream", BindingFlags.Instance | BindingFlags.NonPublic);
                            stream = new MemoryStream();
                            //si.Invoke(speech, new object[] { stream, format, true, true });
                            //speech.SetOutputToWaveStream(stream);
                            speech.SetOutputToAudioStream(stream, format);
                            speech.SelectVoice(config.getVoice (r.Language, r.Voice));
                            int rate = (r.Speed * 2 - 10);
                            
                            Console.WriteLine(rate);
                            try
                            {
                                speech.Rate = rate;
                            }
                            catch (ArgumentOutOfRangeException ex)
                            {
                                speech.Rate = 0;
                            }
                            speech.SpeakAsync(r.Text);
                        //}

                        synthesis.WaitOne();
                        speech.SpeakCompleted -= speech_SpeakCompleted;
                        speech.SetOutputToNull();
                        speech.Dispose();
                        //IsSpeaking = false;
                        IsSounding = true;
                        stream.Position = 0;
                        //WaveFormat.CreateCustomFormat(WaveFormatEncoding.WmaVoice9, 11025, 1, 16000, 2, 16)
                        using(RawSourceWaveStream reader = new RawSourceWaveStream(stream, WaveFormat.CreateALawFormat(8000, 1))) {
                            WaveStream ws = WaveFormatConversionStream.CreatePcmStream(reader);

                            //var waveProvider = new MultiplexingWaveProvider(new IWaveProvider[] { ws }, 4);
                            //waveProvider.ConnectInputToOutput(0, 3);

                            sound = new WaveOutEvent();
                            // set output device *before* init
                            Console.WriteLine("Output Device: " + OutputDeviceId);
                            sound.DeviceNumber = OutputDeviceId;
                            sound.Init(ws);
                            //sound.Init(waveProvider);
                            sound.PlaybackStopped += output_PlaybackStopped;
                           // Console.WriteLine("playing here " + ws.Length);
                            sound.Play();
                        }
                        playback.WaitOne();
                        //IsSounding = false;
                        speechtimer.Enabled = true;
                    }
                }
            };

            queuetimer.Enabled = true;
            queuetimer.Start();
            speechtimer.Enabled = true;
            speechtimer.Start();

            InitHTTPServer();

        }
开发者ID:humanrights,项目名称:ventriloquist,代码行数:101,代码来源:Program.cs

示例3: GenerateVoiceAnnouncement

        private static byte[] GenerateVoiceAnnouncement(string announcement)
        {
            var synthesizer = new SpeechSynthesizer();
            var waveStream = new MemoryStream();
            var firstOrDefault = synthesizer.GetInstalledVoices()
                .FirstOrDefault(x => x.VoiceInfo.Name.ToUpper().Contains("DAVID"));
            if (firstOrDefault != null)
                synthesizer.SelectVoice(
                    firstOrDefault
                        .VoiceInfo.Name);
            synthesizer.SetOutputToAudioStream(waveStream,
                new SpeechAudioFormatInfo(EncodingFormat.Pcm,
                    44100, 16, 2, 176400, 2, null));
            synthesizer.Volume = 100;
            synthesizer.Rate = 1;
            synthesizer.Speak(announcement);
            synthesizer.SetOutputToNull();

            return waveStream.ToArray();
        }
开发者ID:KR0SIV,项目名称:EAS-Encoder,代码行数:20,代码来源:EASEncoder.cs

示例4: tts

        public static void tts(string toRead)
        {

            try
            {

                lock (speakingLock)
                {
                    while (speaking)
                    {
                        // wait for previous speech to finish
                        Thread.Sleep(0);
                    }
                    speaking = true;
                }

                try
                {
                    using (SpeechSynthesizer m_speechSynth = new SpeechSynthesizer())
                    {
                        m_speechSynth.Volume = speechVolume;
                        //m_speechSynth.Rate = 2;

                        MemoryStream waveStream = new MemoryStream();
                        m_speechSynth.SetOutputToWaveStream(waveStream);

                        m_speechSynth.Speak(toRead);
                        //m_speechSynth.SpeakAsync(toRead);
                        //m_speechSynth.SpeakCompleted += delegate
                        {
                            try
                            {
                                m_speechSynth.SetOutputToNull();

                                waveStream.Position = 0; // reset counter to start


                                VoiceThroughNetAudio netAudio = new VoiceThroughNetAudio(waveStream, "WAV", FFXIVAPP.Common.Constants.DefaultAudioDevice);


                                lock (TTSQueue)
                                {
                                    TTSQueue.Add(netAudio);
                                }

                            }
                            catch (Exception ex2)
                            {
                                debug("SpeakCompleted", DBMErrorLevel.EngineErrors, ex2);
                            }
                        };
                    }
                }
                catch (Exception ex1)
                {
                    debug("tts2", DBMErrorLevel.EngineErrors, ex1);
                }

                speaking = false;
            }
            catch (Exception ex)
            {
                debug("tts1", DBMErrorLevel.EngineErrors, ex);
            }


        }
开发者ID:Yaguar666,项目名称:FFXIVDBM.Plugin,代码行数:67,代码来源:EncounterController.cs

示例5: ConvertWithSystemSpeech

 private bool ConvertWithSystemSpeech(string FileContent, string voiceName, VoiceGender gender,CultureInfo ci,int rate, string tempfile)
 {
     Console.WriteLine("Converting with: System Speech");
     try
     {
         EncodingFormat eformat = EncodingFormat.Pcm;
         var speech = new SpeechSynthesizer();
         var installedVoices = speech.GetInstalledVoices();
         VoiceInfo selectedVoice = null;
         foreach (var voice in installedVoices)
         {
             if (voice.VoiceInfo.Name == voiceName)
             {
                 selectedVoice = voice.VoiceInfo;
                 break;
             }
             else if (voice.VoiceInfo.Culture == ci)
             {
                 if (voice.VoiceInfo.Gender == gender)
                 {
                     selectedVoice = voice.VoiceInfo;
                     break;
                 }
             }
         }
         speech.SelectVoice(selectedVoice.Name);
         var safi = new SpeechAudioFormatInfo(eformat, 22000, 16, 1, 22000 * 4, 4, null);
         speech.SetOutputToWaveFile(tempfile, safi);
         speech.Speak(FileContent);
         speech.SetOutputToNull();
         return true;
     }
     catch (Exception e)
     {
         Console.WriteLine(e);
         return false;
     }
 }
开发者ID:sensusaps,项目名称:RoboBraille.Web.API,代码行数:38,代码来源:AudioJobProcessor.cs

示例6: Button_Click_2

        private void Button_Click_2(object sender, RoutedEventArgs e)
        {
            this.sensor.DepthFrameReady += this.SensorDepthFrameReady;
            if (Globals.value == "0")
            {

                // Initialize a new instance of the speech synthesizer.
                using (SpeechSynthesizer synth = new SpeechSynthesizer())
                using (MemoryStream stream = new MemoryStream())
                {

                    // Create a SoundPlayer instance to play the output audio file.
                    //MemoryStream streamAudio = new MemoryStream();
                    System.Media.SoundPlayer m_SoundPlayer = new System.Media.SoundPlayer();

                    // Configure the synthesizer to output to an audio stream.
                    synth.SetOutputToWaveStream(streamAudio);

                    // Speak a phrase.
                    synth.Speak("stop!! obstacle detected.");

                    streamAudio.Position = 0;
                    m_SoundPlayer.Stream = streamAudio;
                    m_SoundPlayer.Play();

                    // Set the synthesizer output to null to release the stream.
                    synth.SetOutputToNull();
                    System.Windows.Forms.MessageBox.Show("press ok");
                    Globals.value = "1";
                    // Insert code to persist or process the stream contents here.
                }

            }
        }
开发者ID:nitinrgupta,项目名称:BlindNav,代码行数:34,代码来源:MainWindow.xaml.cs


注:本文中的SpeechSynthesizer.SetOutputToNull方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。