当前位置: 首页>>代码示例>>C#>>正文


C# WaveFormat类代码示例

本文整理汇总了C#中WaveFormat的典型用法代码示例。如果您正苦于以下问题:C# WaveFormat类的具体用法?C# WaveFormat怎么用?C# WaveFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


WaveFormat类属于命名空间,在下文中一共展示了WaveFormat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: CreateMediaObject

        protected override MediaObject CreateMediaObject(WaveFormat inputFormat, WaveFormat outputFormat)
        {
            _comObj = new DmoMP3DecoderObject();
            var mediaObject = new MediaObject(Marshal.GetComInterfaceForObject(_comObj, typeof(IMediaObject)));

            return mediaObject;
        }
开发者ID:CheViana,项目名称:AudioLab,代码行数:7,代码来源:DmoMP3Decoder.cs

示例2: AACEncoder

        public AACEncoder(WaveFormat sourceFormat, Stream targetStream, int defaultBitrate, Guid containerType)
        {
            if (sourceFormat == null)
                throw new ArgumentNullException("sourceForamt");

            if (targetStream == null)
                throw new ArgumentNullException("targetStream");
            if (!targetStream.CanWrite)
                throw new ArgumentException("Stream is not writeable.");

            if (defaultBitrate <= 0)
                throw new ArgumentOutOfRangeException("defaultBitrate");

            if (containerType == Guid.Empty)
                throw new ArgumentNullException("containerType");

            var targetMediaType = FindBestMediaType(MFMediaTypes.MFAudioFormat_AAC,
                sourceFormat.SampleRate, sourceFormat.Channels, defaultBitrate);

            if (targetMediaType == null)
                throw new NotSupportedException("No AAC-Encoder was found. Check whether your system supports AAC encoding.");

            var sourceMediaType = MediaFoundationCore.MediaTypeFromWaveFormat(sourceFormat);

            SetTargetStream(targetStream, sourceMediaType, targetMediaType, containerType);
        }
开发者ID:CheViana,项目名称:AudioLab,代码行数:26,代码来源:AACEncoder.cs

示例3: StartCapture

        public void StartCapture(int sampleRate, Capture captureDevice)
        {
            StopCapture();
            EmptyRequest();

            this.sampleRate = sampleRate;
            readPos = 0;
            IsRecording = false;
            record = null;
            recordTime = 0;
            noRecordTime = 0;
            lastSample = null;
            lastSize = 0;

            capture = (captureDevice == null) ? new Capture() : captureDevice;

            WaveFormat waveFormat = new WaveFormat();// Load the sound 
            waveFormat.BitsPerSample = 16;
            waveFormat.BlockAlign = 2;
            waveFormat.Channels = 1;
            waveFormat.AverageBytesPerSecond = sampleRate * 2;
            waveFormat.SamplesPerSecond = sampleRate;
            waveFormat.FormatTag = WaveFormatTag.Pcm;

            CaptureBufferDescription captureBuffDesc = new CaptureBufferDescription();
            captureBuffDesc.BufferBytes = bufferSize;
            captureBuffDesc.Format = waveFormat;

            captureBuffer = new CaptureBuffer(captureBuffDesc, capture);
            captureBuffer.Start(true);

            captureThread = new Thread(captureLoop);
            captureThread.Start();
            new Thread(EmptyRequest).Start();
        }
开发者ID:ClusterM,项目名称:google-speech-to-text-api.net,代码行数:35,代码来源:SpeechCapture.cs

示例4: WriteWave

        public void WriteWave(Stream Stream, Action Writer, WaveFormat WaveFormat)
        {
            this.Stream = Stream;
            this.BinaryWriter = new BinaryWriter(Stream);

            WriteChunk("RIFF", () =>
            {
                Stream.Write(Encoding.ASCII.GetBytes("WAVE"), 0, 4);
                WriteChunk("fmt ", () =>
                {
                    //Stream.WriteStruct(WaveFormat);
                    var BinaryWriter = new BinaryWriter(Stream);
                    BinaryWriter.Write(WaveFormat.CompressionCode);
                    BinaryWriter.Write(WaveFormat.NumberOfChannels);
                    BinaryWriter.Write(WaveFormat.SampleRate);
                    BinaryWriter.Write(WaveFormat.BytesPerSecond);
                    BinaryWriter.Write(WaveFormat.BlockAlignment);
                    BinaryWriter.Write(WaveFormat.BitsPerSample);
                    BinaryWriter.Write(WaveFormat.Padding);
                });
                WriteChunk("data", () =>
                {
                    Writer();
                });
            });
        }
开发者ID:hermitdave,项目名称:nvorbis,代码行数:26,代码来源:Wav.cs

示例5: MonoToStereoSource

 public MonoToStereoSource(IWaveStream source)
     : base(source)
 {
     if (source.WaveFormat.Channels != 1)
         throw new ArgumentException("format of source has to be stereo(1 channel)", "source");
     _waveFormat = new WaveFormat(source.WaveFormat.SampleRate, 32, 2, AudioEncoding.IeeeFloat);
 }
开发者ID:CheViana,项目名称:AudioLab,代码行数:7,代码来源:MonoToSteroSource.cs

示例6: XAudio2Renderer

		public unsafe XAudio2Renderer()
		{
			waveFormat = new WaveFormat();
			waveFormat.FormatTag = WaveFormatTag.Pcm;
			xAudio = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.AnyProcessor);
			masteringVoice = new MasteringVoice(xAudio, 2, 44100);
		}
开发者ID:cros107,项目名称:CrystalBoy,代码行数:7,代码来源:XAudio2Renderer.cs

示例7: FromWaveFormat

        /// <summary>
        ///     Creates a MediaType based on a given WaveFormat. Don't forget to call Free() for the returend MediaType.
        /// </summary>
        /// <param name="waveFormat">WaveFormat to create a MediaType from.</param>
        /// <returns>Dmo MediaType</returns>
        public static MediaType FromWaveFormat(WaveFormat waveFormat)
        {
            if (waveFormat == null)
                throw new ArgumentNullException("waveFormat");

            var mediaType = new MediaType();
            NativeMethods.MoInitMediaType(ref mediaType, Marshal.SizeOf(waveFormat));

            mediaType.MajorType = AudioSubTypes.MediaTypeAudio;
            mediaType.SubType = WaveFormatExtensible.SubTypeFromWaveFormat(waveFormat);
            mediaType.FixedSizeSamples = (mediaType.SubType == AudioSubTypes.IeeeFloat ||
                                          mediaType.SubType == AudioSubTypes.Pcm)
                ? 1
                : 0;
            mediaType.FormatType = FORMAT_WaveFormatEx;

            IntPtr hWaveFormat = Marshal.AllocHGlobal(Marshal.SizeOf(waveFormat));

            Marshal.StructureToPtr(waveFormat, hWaveFormat, false);

            if (hWaveFormat == IntPtr.Zero)
                throw new InvalidOperationException("hWaveFormat == IntPtr.Zero");
            if (mediaType.CbFormat < Marshal.SizeOf(waveFormat))
                throw new InvalidOperationException("No memory for Format reserved");
            mediaType.PtrFormat = hWaveFormat;

            return mediaType;
        }
开发者ID:hoangduit,项目名称:cscore,代码行数:33,代码来源:MediaType.cs

示例8: AcmStream

        /// <summary>
        /// Creates a new ACM stream to convert one format to another. Note that
        /// not all conversions can be done in one step
        /// </summary>
        /// <param name="sourceFormat">The source audio format</param>
        /// <param name="destFormat">The destination audio format</param>
        public AcmStream(WaveFormat sourceFormat, WaveFormat destFormat)
        {
            try
            {
                streamHandle = IntPtr.Zero;
                this.sourceFormat = sourceFormat;
                int sourceBufferSize = Math.Max(16384, sourceFormat.AverageBytesPerSecond);
                sourceBufferSize -= (sourceBufferSize % sourceFormat.BlockAlign);
                MmException.Try(AcmInterop.acmStreamOpen(out streamHandle, IntPtr.Zero, sourceFormat, destFormat, null, 0, 0, AcmStreamOpenFlags.NonRealTime), "acmStreamOpen");
                
                // horrible stuff due to wierd Marshalling issues
                /*
                IntPtr sourceFormatPointer = WaveFormat.MarshalToPtr(sourceFormat);
                IntPtr destFormatPointer = WaveFormat.MarshalToPtr(destFormat);
                MmResult result = AcmInterop.acmStreamOpen2(out streamHandle, IntPtr.Zero, sourceFormatPointer, destFormatPointer, null, 0, 0, AcmStreamOpenFlags.NonRealTime);
                Marshal.FreeHGlobal(sourceFormatPointer);
                Marshal.FreeHGlobal(destFormatPointer);
                MmException.Try(result, "acmStreamOpen");*/

                streamHeader = new AcmStreamHeader(streamHandle, sourceBufferSize, SourceToDest(sourceBufferSize));
                driverHandle = IntPtr.Zero;
            }
            catch
            {
                // suppress the finalise and clean up resources
                Dispose();
                throw;
            }
        }
开发者ID:Punloeu,项目名称:karaoke,代码行数:35,代码来源:AcmStream.cs

示例9: Wa2Input

        public Wa2Input(Stream file)
            : base(null)
        {
            var header = new byte[0x2C];
            if (header.Length != file.Read (header, 0, header.Length))
                throw new EndOfStreamException();
            if (!Binary.AsciiEqual (header, 8, "WAVEfmt "))
                throw new InvalidFormatException();

            var format = new WaveFormat();
            format.FormatTag                = LittleEndian.ToUInt16 (header, 0x14);
            format.Channels                 = LittleEndian.ToUInt16 (header, 0x16);
            format.SamplesPerSecond         = LittleEndian.ToUInt32 (header, 0x18);
            format.AverageBytesPerSecond    = LittleEndian.ToUInt32 (header, 0x1C);
            format.BlockAlign               = LittleEndian.ToUInt16 (header, 0x20);
            format.BitsPerSample            = LittleEndian.ToUInt16 (header, 0x22);
            format.ExtraSize                = 0;
            this.Format = format;

            uint pcm_size = LittleEndian.ToUInt32 (header, 0x28);
            var pcm = new byte[pcm_size];
            Decode (file, pcm);
            Source = new MemoryStream (pcm);
            this.PcmSize = pcm_size;
            file.Dispose();
        }
开发者ID:Casidi,项目名称:GARbro,代码行数:26,代码来源:AudioWA2.cs

示例10: WmaWriter

        /// <summary>
        /// Create the writer indicating Metadata information
        /// </summary>
        /// <param name="output"><see cref="System.IO.Stream"/> Where resulting WMA string will be written</param>
        /// <param name="format">PCM format of input data received in <see cref="WmaWriter.Write"/> method</param>
        /// <param name="profile">IWMProfile that describe the resulting compressed stream</param>
        /// <param name="metadataAttributes">Array of <see cref="yeti.wma.structs.WM_Attr"/> structures describing the metadata information that will be in the result stream</param>
        public WmaWriter(Stream output, WaveFormat format, IWMProfile profile, IEnumerable<WM_Attr> metadataAttributes)
            : base(output, format)
        {
            m_Writer = WM.CreateWriter();
            var wa = (IWMWriterAdvanced)m_Writer;
            wa.AddSink((IWMWriterSink)this);
            m_Writer.SetProfile(profile);
            uint inputs;
            m_Writer.GetInputCount(out inputs);
            if (inputs == 1)
            {
                IWMInputMediaProps inpProps;
                Guid type;
                m_Writer.GetInputProps(0, out inpProps);
                inpProps.GetType(out type);
                if (type == MediaTypes.WMMEDIATYPE_Audio)
                {
                    WM_MEDIA_TYPE mt;
                    mt.majortype = MediaTypes.WMMEDIATYPE_Audio;
                    mt.subtype = MediaTypes.WMMEDIASUBTYPE_PCM;
                    mt.bFixedSizeSamples = true;
                    mt.bTemporalCompression = false;
                    mt.lSampleSize = (uint)m_InputDataFormat.nBlockAlign;
                    mt.formattype = MediaTypes.WMFORMAT_WaveFormatEx;
                    mt.pUnk = IntPtr.Zero;
                    mt.cbFormat = (uint)Marshal.SizeOf(m_InputDataFormat);

                    GCHandle h = GCHandle.Alloc(m_InputDataFormat, GCHandleType.Pinned);
                    try
                    {
                        mt.pbFormat = h.AddrOfPinnedObject();
                        inpProps.SetMediaType(ref mt);
                    }
                    finally
                    {
                        h.Free();
                    }
                    m_Writer.SetInputProps(0, inpProps);
                    if (metadataAttributes != null)
                    {
                        var info = new WMHeaderInfo((IWMHeaderInfo)m_Writer);
                        foreach (WM_Attr attr in metadataAttributes)
                        {
                            info.SetAttribute(attr);
                        }
                        info = null;
                    }
                    m_Writer.BeginWriting();
                    m_Profile = profile;
                }
                else
                {
                    throw new ArgumentException("Invalid profile", "profile");
                }
            }
            else
            {
                throw new ArgumentException("Invalid profile", "profile");
            }
        }
开发者ID:pclancy,项目名称:yeti,代码行数:67,代码来源:WmaWriter.cs

示例11: Sound

		public Sound(IntPtr handle, DirectSound device)
		{
			if (device != null)
			{
				device.SetCooperativeLevel(handle, CooperativeLevel.Priority);

				var format = new WaveFormat
					{
						SamplesPerSecond = 44100,
						BitsPerSample = 16,
						Channels = 2,
						FormatTag = WaveFormatTag.Pcm,
						BlockAlignment = 4
					};
				format.AverageBytesPerSecond = format.SamplesPerSecond * format.Channels * (format.BitsPerSample / 8);

				var desc = new SoundBufferDescription
					{
						Format = format,
						Flags =
							BufferFlags.GlobalFocus | BufferFlags.Software | BufferFlags.GetCurrentPosition2 | BufferFlags.ControlVolume,
						SizeInBytes = BufferSize
					};
				DSoundBuffer = new SecondarySoundBuffer(device, desc);
				ChangeVolume(Global.Config.SoundVolume);
			}
			SoundBuffer = new byte[BufferSize];

			disposed = false;
		}
开发者ID:ddugovic,项目名称:RASuite,代码行数:30,代码来源:Sound.cs

示例12: DmoChannelResampler

        /// <summary>
        /// Initializes a new instance of the <see cref="DmoChannelResampler"/> class.
        /// </summary>
        /// <param name="source">Underlying source which has to get resampled.</param>
        /// <param name="channelMatrix"><see cref="ChannelMatrix" /> which defines how to map each channel.</param>
        /// <param name="outputFormat">Waveformat, which specifies the new format. Note, that by far not all formats are supported.</param>
        /// <exception cref="System.ArgumentNullException">
        /// source
        /// or
        /// channelMatrix
        /// or
        /// outputFormat
        /// </exception>
        /// <exception cref="System.ArgumentException">The number of channels of the source has to be equal to the number of input channels specified by the channelMatrix.</exception>
        public DmoChannelResampler(IWaveSource source, ChannelMatrix channelMatrix, WaveFormat outputFormat)
            : base(source, outputFormat)
        {
            if (source == null)
                throw new ArgumentNullException("source");
            if (channelMatrix == null)
                throw new ArgumentNullException("channelMatrix");
            if(outputFormat == null)
                throw new ArgumentNullException("outputFormat");

            if (source.WaveFormat.Channels != channelMatrix.InputChannelCount)
            {
                throw new ArgumentException(
                    "The number of channels of the source has to be equal to the number of input channels specified by the channelMatrix.");
            }

            var inputFormat = new WaveFormatExtensible(
                source.WaveFormat.SampleRate,
                source.WaveFormat.BitsPerSample,
                source.WaveFormat.Channels,
                WaveFormatExtensible.SubTypeFromWaveFormat(source.WaveFormat),
                channelMatrix.InputMask);

            Outputformat = new WaveFormatExtensible(
                outputFormat.SampleRate,
                outputFormat.BitsPerSample,
                outputFormat.Channels,
                WaveFormatExtensible.SubTypeFromWaveFormat(outputFormat),
                channelMatrix.OutputMask);

            Initialize(inputFormat, Outputformat);
            _channelMatrix = channelMatrix;
            CommitChannelMatrixChanges();
        }
开发者ID:opcon,项目名称:cscore,代码行数:48,代码来源:DmoChannelResampler.cs

示例13: DirectSoundSecondaryBuffer

        /// <summary>
        /// Initializes a new instance of the <see cref="DirectSoundSecondaryBuffer"/> class.
        /// </summary>
        /// <param name="directSound">A <see cref="DirectSoundBase"/> instance which provides the <see cref="DirectSoundBase.CreateSoundBuffer"/> method.</param>
        /// <param name="waveFormat">The <see cref="WaveFormat"/> of the sound buffer.</param>
        /// <param name="bufferSize">The buffer size. Internally, the <see cref="DSBufferDescription.BufferBytes"/> will be set to <paramref name="bufferSize"/> * 2.</param>
        /// <exception cref="System.ArgumentNullException"><paramref name="directSound"/> or <paramref name="waveFormat"/></exception>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="bufferSize"/> must be a value between 4 and 0x0FFFFFFF.</exception>
        public DirectSoundSecondaryBuffer(DirectSoundBase directSound, WaveFormat waveFormat, int bufferSize)
        {
            if (directSound == null)
                throw new ArgumentNullException("directSound");
            if (waveFormat == null)
                throw new ArgumentNullException("waveFormat");
            if(bufferSize < 4 || bufferSize > 0x0FFFFFFF)
                throw new ArgumentOutOfRangeException("bufferSize");

            DSBufferDescription secondaryBufferDesc = new DSBufferDescription()
            {
                BufferBytes = bufferSize,
                Flags = DSBufferCapsFlags.ControlFrequency | DSBufferCapsFlags.ControlPan |
                          DSBufferCapsFlags.ControlVolume | DSBufferCapsFlags.ControlPositionNotify |
                          DSBufferCapsFlags.GetCurrentPosition2 | DSBufferCapsFlags.GlobalFocus |
                          DSBufferCapsFlags.StickyFocus,
                Reserved = 0,
                Guid3DAlgorithm = Guid.Empty
            };

            secondaryBufferDesc.Size = Marshal.SizeOf(secondaryBufferDesc);
            GCHandle hWaveFormat = GCHandle.Alloc(waveFormat, GCHandleType.Pinned);
            try
            {
                secondaryBufferDesc.PtrFormat = hWaveFormat.AddrOfPinnedObject();
                //Create(directSound, secondaryBufferDesc);
                BasePtr = directSound.CreateSoundBuffer(secondaryBufferDesc, IntPtr.Zero);
            }
            finally
            {
                hWaveFormat.Free();
            }
        }
开发者ID:hoangduit,项目名称:cscore,代码行数:41,代码来源:DirectSoundSecondaryBuffer.cs

示例14: AddMixerInput

 /// <summary>
 /// Adds a new mixer input
 /// </summary>
 /// <param name="mixerInput">Mixer input</param>
 public void AddMixerInput(ISampleProvider mixerInput)
 {
     // we'll just call the lock around add since we are protecting against an AddMixerInput at
     // the same time as a Read, rather than two AddMixerInput calls at the same time
     lock (sources)
     {
         if (this.sources.Count >= maxInputs)
         {
             throw new InvalidOperationException("Too many mixer inputs");
         }
         this.sources.Add(mixerInput);
     }
     if (this.waveFormat == null)
     {
         this.waveFormat = mixerInput.WaveFormat;
     }
     else
     {
         if (this.WaveFormat.SampleRate != mixerInput.WaveFormat.SampleRate ||
             this.WaveFormat.Channels != mixerInput.WaveFormat.Channels)
         {
             throw new ArgumentException("All mixer inputs must have the same WaveFormat");
         }
     }
 }
开发者ID:hanistory,项目名称:hasuite,代码行数:29,代码来源:MixingSampleProvider.cs

示例15: ini

        public void ini()
        {
            this.Text = this.Tag.ToString();
            file = this.Text;
            mWavFormat = SetWaveFormat();
            try
            {
                f.Hide();
                FileInfo de = new FileInfo(Path.GetDirectoryName(this.Text) + "\\" + Path.GetFileNameWithoutExtension(this.Text) + "T.wav");
                if (de.Exists)
                    de.Delete();
                CreateWaveFile(Path.GetDirectoryName(this.Text) + "\\" + Path.GetFileNameWithoutExtension(this.Text) + "T.wav");
                CreateCaptuerDevice();
                CreateCaptureBuffer();
                CreateNotification();

                if (File.Exists(Path.GetDirectoryName(file) + "//" + Path.GetFileNameWithoutExtension(file) + ".lrc"))
                {
                    lrc = new Lyrics(file);
                }
                wi = new WaveInfo(file);
                progressBar1.Maximum = (int)wi.Second;
            }
            catch (Exception ex)
            {
                MessageBox.Show("Error");
                f.Show();
                this.Close();
            }
        }
开发者ID:764664,项目名称:SimpleKaraoke,代码行数:30,代码来源:Form2.cs


注:本文中的WaveFormat类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。