当前位置: 首页>>代码示例>>C#>>正文


C# WaveChannel32.Close方法代码示例

本文整理汇总了C#中NAudio.Wave.WaveChannel32.Close方法的典型用法代码示例。如果您正苦于以下问题:C# WaveChannel32.Close方法的具体用法?C# WaveChannel32.Close怎么用?C# WaveChannel32.Close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在NAudio.Wave.WaveChannel32的用法示例。


在下文中一共展示了WaveChannel32.Close方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: waveformGenerateWorker_DoWork

        private void waveformGenerateWorker_DoWork(object sender, DoWorkEventArgs e)
        {
            WaveformGenerationParams waveformParams = e.Argument as WaveformGenerationParams;

            /*
            float[] audio = CommonUtils.Audio.NAudio.AudioUtilsNAudio.ReadMonoFromFile(waveformParams.Path, 44100, 0, 0);
            Dispatcher.CurrentDispatcher.Invoke(new Action(() =>
                                                           {
                                                           	WaveformData = audio;
                                                           }));
            return;
             */

            ISampleProvider sampleProvider = new AudioFileReader(waveformParams.Path);
            WaveStream fileWaveStream = (WaveStream) sampleProvider;
            WaveChannel32 waveformInputStream = new WaveChannel32(fileWaveStream);
            waveformInputStream.PadWithZeroes = false;
            waveformInputStream.Sample += waveStream_Sample;

            int frameLength = fftDataSize;
            int frameCount = (int)((double)waveformInputStream.Length / (double)frameLength);
            int waveformLength = frameCount * 2;
            float[] samples = new float[frameLength];
            List<float> floatList = new List<float>();
            while(sampleProvider.Read(samples, 0, samples.Length) > 0)
            {
                if (waveformInputStream.WaveFormat.Channels == 1) {
                    floatList.AddRange(samples);
                } else if (waveformInputStream.WaveFormat.Channels == 2) {
                    switch(stereoProcessing) {
                        case StereoProcessingType.CHANNEL_STEREO_LEFT:
                            for (int i = 0; i < samples.Length; i+=2) {
                                float left = samples[i];
                                float right = samples[i+1];
                                floatList.Add(left);
                            }
                            break;
                        case StereoProcessingType.CHANNEL_STEREO_RIGHT:
                            for (int i = 0; i < samples.Length; i+=2) {
                                float left = samples[i];
                                float right = samples[i+1];
                                floatList.Add(right);
                            }
                            break;
                        case StereoProcessingType.CHANNEL_MONOMIX:
                        default:
                            for (int i = 0; i < samples.Length; i+=2) {
                                float left = samples[i];
                                float right = samples[i+1];
                                // Make stored channel data stereo by averaging left and right values.
                                floatList.Add(( (left + right) / 2.0f));
                            }
                            break;
                    }
                }

                if (waveformGenerateWorker.CancellationPending)
                {
                    e.Cancel = true;
                    break;
                }
            }

            Dispatcher.CurrentDispatcher.Invoke(new Action(() =>
                                                           {
                                                           	WaveformData = floatList.ToArray();
                                                           }));

            waveformInputStream.Close();
            waveformInputStream.Dispose();
            waveformInputStream = null;
        }
开发者ID:remy22,项目名称:AudioVSTToolbox,代码行数:72,代码来源:NAudioEngine.cs

示例2: waveformGenerateWorker_DoWork

        private void waveformGenerateWorker_DoWork(object sender, DoWorkEventArgs e)
        {
            WaveformGenerationParams waveformParams = e.Argument as WaveformGenerationParams;
            Mp3FileReader waveformMp3Stream = new Mp3FileReader(waveformParams.Path);
            WaveChannel32 waveformInputStream = new WaveChannel32(waveformMp3Stream);
            waveformInputStream.Sample += waveStream_Sample;

            int frameLength = fftDataSize;
            int frameCount = (int)((double)waveformInputStream.Length / (double)frameLength);
            int waveformLength = frameCount * 2;
            byte[] readBuffer = new byte[frameLength];
            waveformAggregator = new SampleAggregator(frameLength);

            float maxLeftPointLevel = float.MinValue;
            float maxRightPointLevel = float.MinValue;
            int currentPointIndex = 0;
            float[] waveformCompressedPoints = new float[waveformParams.Points];
            List<float> waveformData = new List<float>();
            List<int> waveMaxPointIndexes = new List<int>();

            for (int i = 1; i <= waveformParams.Points; i++)
            {
                waveMaxPointIndexes.Add((int)Math.Round(waveformLength * ((double)i / (double)waveformParams.Points), 0));
            }
            int readCount = 0;
            while (currentPointIndex * 2 < waveformParams.Points && waveformInputStream.Position < (waveformInputStream.Length - 1024))
            {
                waveformInputStream.Read(readBuffer, 0, readBuffer.Length);

                waveformData.Add(waveformAggregator.LeftMaxVolume);
                waveformData.Add(waveformAggregator.RightMaxVolume);

                if (waveformAggregator.LeftMaxVolume > maxLeftPointLevel)
                    maxLeftPointLevel = waveformAggregator.LeftMaxVolume;
                if (waveformAggregator.RightMaxVolume > maxRightPointLevel)
                    maxRightPointLevel = waveformAggregator.RightMaxVolume;

                if (readCount > waveMaxPointIndexes[currentPointIndex])
                {
                    waveformCompressedPoints[(currentPointIndex * 2)] = maxLeftPointLevel;
                    waveformCompressedPoints[(currentPointIndex * 2) + 1] = maxRightPointLevel;
                    maxLeftPointLevel = float.MinValue;
                    maxRightPointLevel = float.MinValue;
                    currentPointIndex++;
                }
                if (readCount % 3000 == 0)
                {
                    float[] clonedData = (float[])waveformCompressedPoints.Clone();
                    App.Current.Dispatcher.Invoke(new Action(() =>
                    {
                        WaveformData = clonedData;
                    }));
                }

                if (waveformGenerateWorker.CancellationPending)
                {
                    e.Cancel = true;
                    break;
                }
                readCount++;
            }

            float[] finalClonedData = (float[])waveformCompressedPoints.Clone();
            App.Current.Dispatcher.Invoke(new Action(() =>
            {
                fullLevelData = waveformData.ToArray();
                WaveformData = finalClonedData;
            }));
            waveformInputStream.Close();
            waveformInputStream.Dispose();
            waveformInputStream = null;
            waveformMp3Stream.Close();
            waveformMp3Stream.Dispose();
            waveformMp3Stream = null;
        }
开发者ID:moezRebai,项目名称:LightMusicPalyer,代码行数:75,代码来源:NAudioEngine.cs

示例3: RealMix

        private byte[] RealMix(ReceivedRtp item1, ReceivedRtp item2)
        {
            if (item1 == null || item2 == null) return null;

            if (item1.size == 0 || item2.size == 0) return null;

            byte[] wavSrc1 = new byte[item1.size - headersize];
            byte[] wavSrc2 = new byte[item2.size - headersize];

            Array.Copy(item1.buff, headersize, wavSrc1, 0, (item1.size - headersize));
            Array.Copy(item2.buff, headersize, wavSrc2, 0, (item2.size - headersize));

            WaveMixerStream32 mixer = new WaveMixerStream32();
            // mixer.AutoStop = true;
            MemoryStream memstrem = new MemoryStream(wavSrc1);
            RawSourceWaveStream rawsrcstream = new RawSourceWaveStream(memstrem, this.codec);
            WaveFormatConversionStream conversionstream = new WaveFormatConversionStream(pcmFormat16, rawsrcstream);
            WaveChannel32 channelstream = new WaveChannel32(conversionstream);
            mixer.AddInputStream(channelstream);

            memstrem = new MemoryStream(wavSrc2);
            rawsrcstream = new RawSourceWaveStream(memstrem, this.codec);
            conversionstream = new WaveFormatConversionStream(pcmFormat16, rawsrcstream);
            channelstream = new WaveChannel32(conversionstream);
            mixer.AddInputStream(channelstream);
            mixer.Position = 0;

            Wave32To16Stream to16 = new Wave32To16Stream(mixer);
            var convStm = new WaveFormatConversionStream(pcmFormat8, to16);
            byte[] mixedbytes = new byte[(int)convStm.Length];
            int chk = convStm.Read(mixedbytes, 0, (int)convStm.Length);
            //Buffer.BlockCopy(tobyte, 0, writingBuffer, 0, tobyte.Length);

            memstrem.Close();
            rawsrcstream.Close();
            conversionstream.Close();
            channelstream.Close();

            convStm.Close(); convStm.Dispose(); convStm = null;
            to16.Close(); to16.Dispose(); to16 = null;
            mixer.Close(); mixer.Dispose(); mixer = null;

            return mixedbytes;
        }
开发者ID:step4u,项目名称:MiniCRM,代码行数:44,代码来源:RtpRecordInfo.cs

示例4: waveformGenerateWorker_DoWork

        private void waveformGenerateWorker_DoWork(object sender, DoWorkEventArgs e)
        {
            var waveformParams = (WaveformGenerationParams)e.Argument;

            float[] waveformDataFromCache;
            if (waveformDataCache.TryGet(waveformParams.Path, out waveformDataFromCache))
            {
                Application.Current.Dispatcher.Invoke(() => WaveformData = waveformDataFromCache);
                return;
            }

            Mp3FileReader waveformMp3Stream = new Mp3FileReader(waveformParams.Path);
            WaveChannel32 waveformInputStream = new WaveChannel32(waveformMp3Stream);
            waveformInputStream.Sample += waveStream_Sample;

            int frameLength = fftDataSize;
            int frameCount = (int)((double)waveformInputStream.Length / (double)frameLength);
            int waveformLength = frameCount * 2;
            byte[] readBuffer = new byte[frameLength];
            waveformAggregator = new SampleAggregator(frameLength);

            float maxLeftPointLevel = float.MinValue;
            float maxRightPointLevel = float.MinValue;
            int currentPointIndex = 0;
            float[] waveformCompressedPoints = new float[waveformParams.Points];
            List<float> waveformData = new List<float>();
            List<int> waveMaxPointIndexes = new List<int>();

            for (int i = 1; i <= waveformParams.Points; i++)
            {
                waveMaxPointIndexes.Add((int)Math.Round(waveformLength * ((double)i / (double)waveformParams.Points), 0));
            }
            int readCount = 0;
            while (currentPointIndex * 2 < waveformParams.Points)
            {
                waveformInputStream.Read(readBuffer, 0, readBuffer.Length);

                waveformData.Add(waveformAggregator.LeftMaxVolume);
                waveformData.Add(waveformAggregator.RightMaxVolume);

                if (waveformAggregator.LeftMaxVolume > maxLeftPointLevel)
                    maxLeftPointLevel = waveformAggregator.LeftMaxVolume;
                if (waveformAggregator.RightMaxVolume > maxRightPointLevel)
                    maxRightPointLevel = waveformAggregator.RightMaxVolume;

                if (readCount > waveMaxPointIndexes[currentPointIndex])
                {
                    waveformCompressedPoints[(currentPointIndex * 2)] = maxLeftPointLevel;
                    waveformCompressedPoints[(currentPointIndex * 2) + 1] = maxRightPointLevel;
                    maxLeftPointLevel = float.MinValue;
                    maxRightPointLevel = float.MinValue;
                    currentPointIndex++;
                }
                if (readCount % 3000 == 0)
                {
                    float[] clonedData = (float[])waveformCompressedPoints.Clone();

                    try
                    {
                        App.Current.Dispatcher.Invoke(new Action(() =>
                        {
                            WaveformData = clonedData;
                        }));
                    }
                    catch (TaskCanceledException)
                    {
                        e.Cancel = true;
                        return;
                    }
                    
                }

                if (waveformGenerateWorker.CancellationPending)
                {
                    e.Cancel = true;
                    break;
                }
                readCount++;
            }

            float[] finalClonedData = (float[])waveformCompressedPoints.Clone();

            // Don't cache half-finished waveforms (my CDJ-1000MK3s do this...)
            if (!waveformGenerateWorker.CancellationPending)
                waveformDataCache.Add(waveformParams.Path, finalClonedData);

            App.Current.Dispatcher.Invoke(new Action(() =>
            {
                fullLevelData = waveformData.ToArray();
                WaveformData = finalClonedData;
            }));
            waveformInputStream.Close();
            waveformInputStream.Dispose();
            waveformInputStream = null;
            waveformMp3Stream.Close();
            waveformMp3Stream.Dispose();
            waveformMp3Stream = null;
        }
开发者ID:rdingwall,项目名称:mixplanner,代码行数:98,代码来源:NAudioEngine.cs

示例5: LoadSound

        private void LoadSound(WaveChannel32 sound, int index)
        {
            int count = 0;
            int read = 0;
            sound.Sample += Sound0_Sample;
            bufferSize =1024* sampleRate * 16 / 256000*Channels ;

            byte[] buffer = new byte[bufferSize];

            while (sound.Position < sound.Length)
            {
                max =-1;
                min = 1;

                read = sound.Read(buffer, 0, bufferSize);
                pwfc.WaveFormDisplay.AddValue(max, min);
                count++;
            }

            sound.Close();
            wfr.Close();
            Debug.WriteLine("Sound is " + sound.TotalTime.TotalMilliseconds + "ms long");
            Debug.WriteLine("Sound is " + wfr.Length + " bytes");
            Debug.WriteLine("Called addvalue " + count + " times");
        }
开发者ID:CaffeineAU,项目名称:TTSTranslator,代码行数:25,代码来源:AudioEditor.xaml.cs

示例6: worker_DoWork

        private void worker_DoWork(object sender, DoWorkEventArgs e)
        {
            Mp3FileReader reader = new Mp3FileReader(FileName);
            WaveChannel32 channel = new WaveChannel32(reader);
            channel.Sample += new EventHandler<SampleEventArgs>(channel_Sample);

            int points = 2000;

            int frameLength = (int)FFTDataSize.FFT2048;
            int frameCount = (int)((double)channel.Length / (double)frameLength);
            int waveformLength = frameCount * 2;
            byte[] readBuffer = new byte[frameLength];

            float maxLeftPointLevel = float.MinValue;
            float maxRightPointLevel = float.MinValue;
            int currentPointIndex = 0;
            float[] waveformCompressedPoints = new float[points];
            List<float> waveformData = new List<float>();
            List<int> waveMaxPointIndexes = new List<int>();

            for (int i = 1; i <= points; i++)
            {
                waveMaxPointIndexes.Add((int)Math.Round(waveformLength * ((double)i / (double)points), 0));
            }
            int readCount = 0;
            while (currentPointIndex * 2 < points)
            {
                channel.Read(readBuffer, 0, readBuffer.Length);

                waveformData.Add(InputSampler.LeftMax);
                waveformData.Add(InputSampler.RightMax);

                if (InputSampler.LeftMax > maxLeftPointLevel)
                    maxLeftPointLevel = InputSampler.LeftMax;
                if (InputSampler.RightMax > maxRightPointLevel)
                    maxRightPointLevel = InputSampler.RightMax;

                if (readCount > waveMaxPointIndexes[currentPointIndex])
                {
                    waveformCompressedPoints[(currentPointIndex * 2)] = maxLeftPointLevel;
                    waveformCompressedPoints[(currentPointIndex * 2) + 1] = maxRightPointLevel;
                    maxLeftPointLevel = float.MinValue;
                    maxRightPointLevel = float.MinValue;
                    currentPointIndex++;
                }
                if (readCount % 3000 == 0)
                {
                    WaveformData = (float[])waveformCompressedPoints.Clone();
                }

                if (worker.CancellationPending)
                {
                    e.Cancel = true;
                    break;
                }
                readCount++;
            }

            FullLevelData = waveformData.ToArray();
            WaveformData = (float[])waveformCompressedPoints.Clone();

            // Cleanup
            channel.Close();
            channel.Dispose();
            channel = null;
            reader.Close();
            reader.Dispose();
            reader = null;
        }
开发者ID:pbeardshear,项目名称:TempoMonkey,代码行数:69,代码来源:WaveformTimeline.cs

示例7: ProcessMixing2

        private void ProcessMixing2(RcvData data, int dataSize)
        {
            string processingFn = string.Format("d:\\{0}_{1}_{2}.wav", data.seqnum, data.extension, data.peernumber);

            List<RecInfos> ls0 = lExtension0.FindAll(
                        delegate(RecInfos list)
                        {
                            return list.rcvData.Equals(data) && list.isExtension == 0;
                        });

            List<RecInfos> ls1 = lExtension1.FindAll(
                        delegate(RecInfos list)
                        {
                            return list.rcvData.Equals(data) && list.isExtension == 1;
                        });

            IsExtensionComparer isExtensionCompare = new IsExtensionComparer();
            ls0.Sort(isExtensionCompare);
            ls1.Sort(isExtensionCompare);

            int count = 0;
            int count0 = ls0.Count();
            int count1 = ls1.Count();

            if (count0 - count1 < 0)
                count = count0;
            else
                count = count1;

            byte[] buffWriting = new byte[320 * count];

            for (int i = 0; i < count; i++)
            {
                if (ls0[i].seq == ls1[i].seq)
                {
                    // 믹싱
                    // 코덱 종류에 따라서 바이트 길이는 달라질 수 있다. 실제로 만들 때 경우의 수 확인하고 만들어야 한다.
                    byte[] wavSrc0 = new byte[160];
                    byte[] wavSrc1 = new byte[160];

                    Array.Copy(ls0[i].voice, 12, wavSrc0, 0, wavSrc0.Length);
                    Array.Copy(ls1[i].voice, 12, wavSrc1, 0, wavSrc1.Length);

                    WaveMixerStream32 mixer = new WaveMixerStream32();
                    //mixer.AutoStop = true;

                    WaveChannel32 channelStm = null;

                    MemoryStream memStm = null;
                    BufferedStream bufStm = null;
                    RawSourceWaveStream rawSrcStm = null;
                    WaveFormatConversionStream conversionStm = null;

                    for (int j = 0; j < 2; j++)
                    {
                        if (j == 0)
                            memStm = new MemoryStream(wavSrc0);
                        else
                            memStm = new MemoryStream(wavSrc1);

                        bufStm = new BufferedStream(memStm);
                        rawSrcStm = new RawSourceWaveStream(bufStm, mulawFormat);
                        conversionStm = new WaveFormatConversionStream(pcmFormat, rawSrcStm);

                        channelStm = new WaveChannel32(conversionStm);
                        mixer.AddInputStream(channelStm);
                    }
                    mixer.Position = 0;

                    Wave32To16Stream to16 = new Wave32To16Stream(mixer);
                    var convStm = new WaveFormatConversionStream(pcmFormat, to16);
                    byte[] tobyte = new byte[(int)convStm.Length];
                    int chk = convStm.Read(tobyte, 0, (int)convStm.Length);
                    Buffer.BlockCopy(tobyte, 0, buffWriting, i * tobyte.Length, tobyte.Length);

                    conversionStm.Close();
                    rawSrcStm.Close();
                    bufStm.Close();
                    memStm.Close();

                    convStm.Close();
                    to16.Close();
                    channelStm.Close();
                    mixer.Close();

                    // 삭제
                    lExtension0.Remove(ls0[i]);
                    lExtension1.Remove(ls1[i]);
                }
                else if (ls0[i].seq - ls1[i].seq < 0)
                {
                    // ls0 만 믹싱
                    // ls0 원본에 byte[] 붙임 > 원본 byte[]를 wavesream 으로 변환 > wave 파일로 저장

                    // 믹싱
                    // 코덱 종류에 따라서 바이트 길이는 달라질 수 있다. 실제로 만들 때 경우의 수 확인하고 만들어야 한다.
                    byte[] wavSrc0 = new byte[160];
                    byte[] wavSrc1 = new byte[160];

                    Array.Copy(ls0[i].voice, 12, wavSrc0, 0, wavSrc0.Length);
//.........这里部分代码省略.........
开发者ID:step4u,项目名称:CallService,代码行数:101,代码来源:RTPRecorder.cs


注:本文中的NAudio.Wave.WaveChannel32.Close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。