本文整理汇总了C#中SharpDX.XAudio2.XAudio2类的典型用法代码示例。如果您正苦于以下问题:C# XAudio2类的具体用法?C# XAudio2怎么用?C# XAudio2使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
XAudio2类属于SharpDX.XAudio2命名空间,在下文中一共展示了XAudio2类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: SubmixVoice
/// <summary>
/// Creates and configures a submix voice with an effect chain.
/// </summary>
/// <param name="device">an instance of <see cref = "SharpDX.XAudio2.XAudio2" /></param>
/// <param name="inputChannels">[in] Number of channels in the input audio data of the submix voice. InputChannels must be less than or equal to XAUDIO2_MAX_AUDIO_CHANNELS. </param>
/// <param name="inputSampleRate">[in] Sample rate of the input audio data of submix voice. This rate must be a multiple of XAUDIO2_QUANTUM_DENOMINATOR. InputSampleRate must be between XAUDIO2_MIN_SAMPLE_RATE and XAUDIO2_MAX_SAMPLE_RATE. </param>
/// <param name="flags">[in] Flags that specify the behavior of the submix voice. Can be 0 or the following: ValueDescriptionXAUDIO2_VOICE_USEFILTERThe filter effect should be available on this voice.? </param>
/// <param name="processingStage">[in] An arbitrary number that specifies when this voice is processed with respect to other submix voices, if the XAudio2 engine is running other submix voices. The voice is processed after all other voices that include a smaller ProcessingStage value, and before all other voices that include a larger ProcessingStage value. Voices that include the same ProcessingStage value are processed in any order. A submix voice cannot send to another submix voice with a lower or equal ProcessingStage value; this prevents audio being lost due to a submix cycle. </param>
/// <param name="effectDescriptors">[in, optional] Pointer to a list of XAUDIO2_EFFECT_CHAIN structures that describe an effect chain to use in the submix voice.</param>
/// <returns>No documentation.</returns>
/// <unmanaged>HRESULT IXAudio2::CreateSubmixVoice([Out] IXAudio2SubmixVoice** ppSubmixVoice,[None] UINT32 InputChannels,[None] UINT32 InputSampleRate,[None] UINT32 Flags,[None] UINT32 ProcessingStage,[In, Optional] const XAUDIO2_VOICE_SENDS* pSendList,[In, Optional] const XAUDIO2_EFFECT_CHAIN* pEffectChain)</unmanaged>
public SubmixVoice(XAudio2 device, int inputChannels, int inputSampleRate, SubmixVoiceFlags flags, int processingStage, EffectDescriptor[] effectDescriptors)
: base(IntPtr.Zero)
{
if (effectDescriptors != null)
{
unsafe
{
var tempSendDescriptor = new EffectChain();
var effectDescriptorNatives = new EffectDescriptor.__Native[effectDescriptors.Length];
for (int i = 0; i < effectDescriptorNatives.Length; i++)
effectDescriptors[i].__MarshalTo(ref effectDescriptorNatives[i]);
tempSendDescriptor.EffectCount = effectDescriptorNatives.Length;
fixed (void* pEffectDescriptors = &effectDescriptorNatives[0])
{
tempSendDescriptor.EffectDescriptorPointer = (IntPtr)pEffectDescriptors;
device.CreateSubmixVoice(this, inputChannels, inputSampleRate, unchecked((int)flags), processingStage, null, tempSendDescriptor);
}
}
}
else
{
device.CreateSubmixVoice(this, inputChannels, inputSampleRate, unchecked((int)flags), processingStage, null, null);
}
}
示例2: XAudioDevice
public XAudioDevice()
{
if (StackTraceExtensions.StartedFromNUnitConsoleButNotFromNCrunch)
return;
XAudio = new XAudio2();
MasteringVoice = new MasteringVoice(XAudio);
}
示例3: InitializeMediaPlayer
/// <summary>
/// Initialize the media element for playback
/// </summary>
/// <param name="streamConfig">Object containing stream configuration details</param>
void InitializeMediaPlayer(MoonlightStreamConfiguration streamConfig, AvStreamSource streamSource)
{
this._streamSource = streamSource;
_videoMss = new MediaStreamSource(new VideoStreamDescriptor(VideoEncodingProperties.CreateH264()));
_videoMss.BufferTime = TimeSpan.Zero;
_videoMss.CanSeek = false;
_videoMss.Duration = TimeSpan.Zero;
_videoMss.SampleRequested += _videoMss_SampleRequested;
XAudio2 xaudio = new XAudio2();
MasteringVoice masteringVoice = new MasteringVoice(xaudio, 2, 48000);
WaveFormat format = new WaveFormat(48000, 16, 2);
// Set for low latency playback
StreamDisplay.RealTimePlayback = true;
// Render on the full window to avoid extra compositing
StreamDisplay.IsFullWindow = true;
// Disable built-in transport controls
StreamDisplay.AreTransportControlsEnabled = false;
// Start playing right away
StreamDisplay.AutoPlay = true;
StreamDisplay.SetMediaStreamSource(_videoMss);
AvStream.SetSourceVoice(new SourceVoice(xaudio, format));
}
示例4: AudioPlayer
/// <summary>
/// Initializes a new instance of the <see cref="AudioPlayer" /> class.
/// </summary>
/// <param name="xaudio2">The xaudio2 engine.</param>
/// <param name="audioStream">The input audio stream.</param>
public AudioPlayer(XAudio2 xaudio2, Stream audioStream)
{
this.xaudio2 = xaudio2;
audioDecoder = new AudioDecoder(audioStream);
//sourceVoice = new SourceVoice(xaudio2, audioDecoder.WaveFormat);
sourceVoice = new SourceVoice(xaudio2, audioDecoder.WaveFormat,0,1.0f);
localVolume = 1.0f;
sourceVoice.BufferEnd += sourceVoice_BufferEnd;
sourceVoice.Start();
bufferEndEvent = new AutoResetEvent(false);
playEvent = new ManualResetEvent(false);
waitForPlayToOutput = new ManualResetEvent(false);
clock = new Stopwatch();
// Pre-allocate buffers
audioBuffersRing = new AudioBuffer[3];
memBuffers = new DataPointer[audioBuffersRing.Length];
for (int i = 0; i < audioBuffersRing.Length; i++)
{
audioBuffersRing[i] = new AudioBuffer();
memBuffers[i].Size = 32 * 1024; // default size 32Kb
memBuffers[i].Pointer = Utilities.AllocateMemory(memBuffers[i].Size);
}
// Initialize to stopped
State = AudioPlayerState.Stopped;
// Starts the playing thread
playingTask = Task.Factory.StartNew(PlayAsync, TaskCreationOptions.LongRunning);
}
示例5: WaveManager
public WaveManager()
{
xAudio = new XAudio2();
var mastering = new MasteringVoice(xAudio);
mastering.SetVolume(1, 0);
xAudio.StartEngine();
}
示例6: InitializeMediaPlayer
/// <summary>
/// Initialize the media element for playback
/// </summary>
/// <param name="streamConfig">Object containing stream configuration details</param>
void InitializeMediaPlayer(MoonlightStreamConfiguration streamConfig, AvStreamSource streamSource)
{
this._streamSource = streamSource;
// This code is based upon the MS FFmpegInterop project on GitHub
VideoEncodingProperties videoProps = VideoEncodingProperties.CreateH264();
videoProps.ProfileId = H264ProfileIds.High;
videoProps.Width = (uint)streamConfig.GetWidth();
videoProps.Height = (uint)streamConfig.GetHeight();
videoProps.Bitrate = (uint)streamConfig.GetBitrate();
_videoMss = new MediaStreamSource(new VideoStreamDescriptor(videoProps));
_videoMss.BufferTime = TimeSpan.Zero;
_videoMss.CanSeek = false;
_videoMss.Duration = TimeSpan.Zero;
_videoMss.SampleRequested += _videoMss_SampleRequested;
XAudio2 xaudio = new XAudio2();
MasteringVoice masteringVoice = new MasteringVoice(xaudio, 2, 48000);
WaveFormat format = new WaveFormat(48000, 16, 2);
// Set for low latency playback
StreamDisplay.RealTimePlayback = true;
// Render on the full window to avoid extra compositing
StreamDisplay.IsFullWindow = true;
// Disable built-in transport controls
StreamDisplay.AreTransportControlsEnabled = false;
StreamDisplay.SetMediaStreamSource(_videoMss);
AvStream.SetSourceVoice(new SourceVoice(xaudio, format));
}
示例7: EffectManager
public EffectManager(XAudio2 xaudio2, int maxInstances, string soundPath)
{
this.xaudio2 = xaudio2;
this.maxInstances = maxInstances;
this.soundPath = soundPath;
this.instances = new Dictionary<WaveFormat, List<SourceVoice>>();
}
示例8: MySourceVoice
public MySourceVoice(XAudio2 device, WaveFormat sourceFormat)
{
m_voice = new SourceVoice(device, sourceFormat, true);
m_voice.BufferEnd += OnStopPlaying;
m_valid = true;
Flush();
}
示例9: HasDeviceChanged
public static unsafe bool HasDeviceChanged(XAudio2 engine, string displayName)
{
const int GetDeviceDetailsMethodOffset = 4;
XAUDIO2_DEVICE_DETAILS details;
var result = (Result)NativeCall.Function<int, IntPtr, int, IntPtr>(new NativeFunction(engine.NativePointer, GetDeviceDetailsMethodOffset), engine.NativePointer, 0, new IntPtr(&details));
result.CheckError();
return !displayName.Equals(details.DisplayName, 256);
}
示例10: MySourceVoice
public MySourceVoice(XAudio2 device, WaveFormat sourceFormat)
{
m_voice = new SourceVoice(device, sourceFormat, true);
m_voice.BufferEnd += OnStopPlayingBuffered;
m_valid = true;
m_dataStreams = new Queue<DataStream>();
Flush();
}
示例11: SoundManager
public SoundManager(int sounds)
{
_audio = new XAudio2();
_masteringVoice = new MasteringVoice(_audio);
_masteringVoice.SetVolume(0.5f);
_soundStreams = new SoundStream[sounds];
_audioBuffers = new AudioBuffer[sounds];
_sourceVoices = new SourceVoice[sounds];
}
示例12: Audio
public Audio(String fileName)
{
device = new XAudio2();
masteringVoice = new MasteringVoice(device);
stream = new SoundStream(File.OpenRead("Content/"+fileName));
buffer = new AudioBuffer {Stream = stream.ToDataStream(),
AudioBytes = (int)stream.Length, Flags = BufferFlags.EndOfStream};
stream.Close();
}
示例13: SoundManager
public SoundManager(int cntVoices)
{
audio = new XAudio2();
masteringVoice = new MasteringVoice(audio);
masteringVoice.SetVolume(0.5f);
voices = new SourceVoice[cntVoices];
buffers = new AudioBuffer[cntVoices];
streams = new SoundStream[cntVoices];
}
示例14: MySourceVoicePool
public MySourceVoicePool(XAudio2 audioEngine, WaveFormat waveformat, MyCueBank owner)
{
m_audioEngine = audioEngine;
m_waveFormat = waveformat;
m_owner = owner;
m_availableVoices = new MyConcurrentQueue<MySourceVoice>(32);
m_fadingOutVoices = new List<MySourceVoice>();
m_currentCount = 0;
}
示例15: InitializeSoundEffect
internal static void InitializeSoundEffect()
{
try
{
if (Device == null)
{
#if !WINRT && DEBUG
try
{
//Fails if the XAudio2 SDK is not installed
Device = new XAudio2(XAudio2Flags.DebugEngine, ProcessorSpecifier.DefaultProcessor);
Device.StartEngine();
}
catch
#endif
{
Device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor);
Device.StartEngine();
}
}
// Just use the default device.
#if WINRT
string deviceId = null;
#else
const int deviceId = 0;
#endif
if (MasterVoice == null)
{
// Let windows autodetect number of channels and sample rate.
MasterVoice = new MasteringVoice(Device, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate, deviceId);
MasterVoice.SetVolume(_masterVolume, 0);
}
// The autodetected value of MasterVoice.ChannelMask corresponds to the speaker layout.
#if WINRT
Speakers = (Speakers)MasterVoice.ChannelMask;
#else
var deviceDetails = Device.GetDeviceDetails(deviceId);
Speakers = deviceDetails.OutputFormat.ChannelMask;
#endif
}
catch
{
// Release the device and null it as
// we have no audio support.
if (Device != null)
{
Device.Dispose();
Device = null;
}
MasterVoice = null;
}
}