本文整理汇总了C++中FAudioDeviceManager::TrackResource方法的典型用法代码示例。如果您正苦于以下问题:C++ FAudioDeviceManager::TrackResource方法的具体用法?C++ FAudioDeviceManager::TrackResource怎么用?C++ FAudioDeviceManager::TrackResource使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FAudioDeviceManager
的用法示例。
在下文中一共展示了FAudioDeviceManager::TrackResource方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
/**
* Static function used to create an OpenAL buffer and upload decompressed ogg vorbis data to.
*
* @param InWave USoundWave to use as template and wave source
* @param AudioDevice audio device to attach created buffer to
* @return FXAudio2SoundBuffer pointer if buffer creation succeeded, NULL otherwise
*/
FXAudio2SoundBuffer* FXAudio2SoundBuffer::CreateNativeBuffer( FXAudio2Device* XAudio2Device, USoundWave* Wave )
{
// Check to see if thread has finished decompressing on the other thread
if( Wave->AudioDecompressor != NULL )
{
Wave->AudioDecompressor->EnsureCompletion();
// Remove the decompressor
delete Wave->AudioDecompressor;
Wave->AudioDecompressor = NULL;
}
// Create new buffer.
FXAudio2SoundBuffer* Buffer = new FXAudio2SoundBuffer( XAudio2Device, SoundFormat_PCM );
// Take ownership the PCM data
Buffer->PCM.PCMData = Wave->RawPCMData;
Buffer->PCM.PCMDataSize = Wave->RawPCMDataSize;
Wave->RawPCMData = NULL;
// Keep track of associated resource name.
Buffer->InitWaveFormatEx( WAVE_FORMAT_PCM, Wave, true );
FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
check(AudioDeviceManager != nullptr);
AudioDeviceManager->TrackResource(Wave, Buffer);
Wave->RemoveAudioResource();
return( Buffer );
}
示例2: CreatePreviewBuffer
/**
* Static function used to create an OpenAL buffer and upload raw PCM data to.
*
* @param InWave USoundWave to use as template and wave source
* @param AudioDevice audio device to attach created buffer to
* @return FCoreAudioSoundBuffer pointer if buffer creation succeeded, NULL otherwise
*/
FCoreAudioSoundBuffer* FCoreAudioSoundBuffer::CreatePreviewBuffer( FCoreAudioDevice* CoreAudioDevice, USoundWave* Wave, FCoreAudioSoundBuffer* Buffer )
{
FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
check(AudioDeviceManager != nullptr);
if (Buffer)
{
AudioDeviceManager->FreeBufferResource( Buffer );
}
// Create new buffer.
Buffer = new FCoreAudioSoundBuffer( CoreAudioDevice, SoundFormat_PCMPreview );
// Take ownership the PCM data
Buffer->PCMData = Wave->RawPCMData;
Buffer->PCMDataSize = Wave->RawPCMDataSize;
Wave->RawPCMData = NULL;
// Copy over whether this data should be freed on delete
Buffer->bDynamicResource = Wave->bDynamicResource;
Buffer->InitAudioStreamBasicDescription( kAudioFormatLinearPCM, Wave, true );
AudioDeviceManager->TrackResource( Wave, Buffer );
return( Buffer );
}
示例3: CreateNativeBuffer
FIOSAudioSoundBuffer* FIOSAudioSoundBuffer::CreateNativeBuffer(FIOSAudioDevice* IOSAudioDevice, USoundWave* InWave)
{
FWaveModInfo WaveInfo;
InWave->InitAudioResource(IOSAudioDevice->GetRuntimeFormat(InWave));
if (!InWave->ResourceData || InWave->ResourceSize <= 0 || !WaveInfo.ReadWaveInfo(InWave->ResourceData, InWave->ResourceSize))
{
InWave->RemoveAudioResource();
return NULL;
}
uint32 UncompressedBlockSize = 0;
uint32 CompressedBlockSize = 0;
const uint32 PreambleSize = 7;
const uint32 BlockSize = *WaveInfo.pBlockAlign;
switch (*WaveInfo.pFormatTag)
{
case SoundFormat_ADPCM:
// (BlockSize - PreambleSize) * 2 (samples per byte) + 2 (preamble samples)
UncompressedBlockSize = (2 + (BlockSize - PreambleSize) * 2) * sizeof(int16);
CompressedBlockSize = BlockSize;
if ((WaveInfo.SampleDataSize % CompressedBlockSize) != 0)
{
InWave->RemoveAudioResource();
return NULL;
}
break;
case SoundFormat_LPCM:
break;
}
// Create new buffer
FIOSAudioSoundBuffer* Buffer = new FIOSAudioSoundBuffer(IOSAudioDevice, static_cast<ESoundFormat>(*WaveInfo.pFormatTag));
Buffer->NumChannels = InWave->NumChannels;
Buffer->SampleRate = InWave->SampleRate;
Buffer->UncompressedBlockSize = UncompressedBlockSize;
Buffer->CompressedBlockSize = CompressedBlockSize;
Buffer->BufferSize = WaveInfo.SampleDataSize;
Buffer->SampleData = static_cast<int16*>(FMemory::Malloc(Buffer->BufferSize));
FMemory::Memcpy(Buffer->SampleData, WaveInfo.SampleDataStart, Buffer->BufferSize);
FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
check(AudioDeviceManager != nullptr);
AudioDeviceManager->TrackResource(InWave, Buffer);
InWave->RemoveAudioResource();
return Buffer;
}
示例4: CreateNativeBuffer
FALSoundBuffer* FALSoundBuffer::CreateNativeBuffer( FALAudioDevice* AudioDevice, USoundWave* Wave)
{
SCOPE_CYCLE_COUNTER( STAT_AudioResourceCreationTime );
// This code is not relevant for now on HTML5 but adding this for consistency with other platforms.
// Check to see if thread has finished decompressing on the other thread
if (Wave->AudioDecompressor != NULL)
{
Wave->AudioDecompressor->EnsureCompletion();
// Remove the decompressor
delete Wave->AudioDecompressor;
Wave->AudioDecompressor = NULL;
}
// Can't create a buffer without any source data
if( Wave == NULL || Wave->NumChannels == 0 )
{
return( NULL );
}
Wave->InitAudioResource(AudioDevice->GetRuntimeFormat(Wave));
FALSoundBuffer* Buffer = NULL;
FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
check(AudioDeviceManager != nullptr);
// Find the existing buffer if any
if( Wave->ResourceID )
{
Buffer = static_cast<FALSoundBuffer*>(AudioDeviceManager->WaveBufferMap.FindRef(Wave->ResourceID));
}
if( Buffer == NULL )
{
// Create new buffer.
Buffer = new FALSoundBuffer( AudioDevice );
alGenBuffers( 1, Buffer->BufferIds );
AudioDevice->alError( TEXT( "RegisterSound" ) );
AudioDeviceManager->TrackResource(Wave, Buffer);
Buffer->InternalFormat = AudioDevice->GetInternalFormat( Wave->NumChannels );
Buffer->NumChannels = Wave->NumChannels;
Buffer->SampleRate = Wave->SampleRate;
if (Wave->RawPCMData)
{
// upload it
Buffer->BufferSize = Wave->RawPCMDataSize;
alBufferData( Buffer->BufferIds[0], Buffer->InternalFormat, Wave->RawPCMData, Wave->RawPCMDataSize, Buffer->SampleRate );
// Free up the data if necessary
if( Wave->bDynamicResource )
{
FMemory::Free( Wave->RawPCMData );
Wave->RawPCMData = NULL;
Wave->bDynamicResource = false;
}
}
else
{
// get the raw data
uint8* SoundData = ( uint8* )Wave->RawData.Lock( LOCK_READ_ONLY );
// it's (possibly) a pointer to a wave file, so skip over the header
int SoundDataSize = Wave->RawData.GetBulkDataSize();
// is there a wave header?
FWaveModInfo WaveInfo;
if (WaveInfo.ReadWaveInfo(SoundData, SoundDataSize))
{
// if so, modify the location and size of the sound data based on header
SoundData = WaveInfo.SampleDataStart;
SoundDataSize = WaveInfo.SampleDataSize;
}
// let the Buffer know the final size
Buffer->BufferSize = SoundDataSize;
// upload it
alBufferData( Buffer->BufferIds[0], Buffer->InternalFormat, SoundData, Buffer->BufferSize, Buffer->SampleRate );
// unload it
Wave->RawData.Unlock();
}
if( AudioDevice->alError( TEXT( "RegisterSound (buffer data)" ) ) || ( Buffer->BufferSize == 0 ) )
{
Buffer->InternalFormat = 0;
}
if( Buffer->InternalFormat == 0 )
{
UE_LOG ( LogAudio, Log,TEXT( "Audio: sound format not supported for '%s' (%d)" ), *Wave->GetName(), Wave->NumChannels );
delete Buffer;
Buffer = NULL;
}
}
//.........这里部分代码省略.........