本文整理汇总了C++中CAStreamBasicDescription类的典型用法代码示例。如果您正苦于以下问题:C++ CAStreamBasicDescription类的具体用法?C++ CAStreamBasicDescription怎么用?C++ CAStreamBasicDescription使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CAStreamBasicDescription类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetOALFormatFromASBD
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
UInt32 GetOALFormatFromASBD(CAStreamBasicDescription &inASBD)
{
switch (inASBD.mFormatID)
{
case kAudioFormatLinearPCM:
// NOTE: if float: return 0;
if (inASBD.mFormatFlags & kAudioFormatFlagIsFloat)
{
return (0); // float currently unsupported
}
else
{
if (inASBD.NumberChannels() == 1 && inASBD.mBitsPerChannel == 16)
return AL_FORMAT_MONO16;
else if (inASBD.NumberChannels() == 2 && inASBD.mBitsPerChannel == 16)
return AL_FORMAT_STEREO16;
else if (inASBD.NumberChannels() == 1 && inASBD.mBitsPerChannel == 8)
return AL_FORMAT_MONO8;
else if (inASBD.NumberChannels() == 2 && inASBD.mBitsPerChannel == 8)
return AL_FORMAT_STEREO8;
}
break;
default:
return (0);
break;
}
return (0);
}
示例2: AUEffectBase
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// karoke::karoke
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
karoke::karoke(AudioUnit component)
: AUEffectBase(component, false)
{
CreateElements();
CAStreamBasicDescription streamDescIn;
streamDescIn.SetCanonical(NUM_INPUTS, false); // number of input channels
streamDescIn.mSampleRate = GetSampleRate();
CAStreamBasicDescription streamDescOut;
streamDescOut.SetCanonical(NUM_OUTPUTS, false); // number of output channels
streamDescOut.mSampleRate = GetSampleRate();
Inputs().GetIOElement(0)->SetStreamFormat(streamDescIn);
Outputs().GetIOElement(0)->SetStreamFormat(streamDescOut);
Globals()->UseIndexedParameters(kNumberOfParameters);
SetParameter(kParam_One, kDefaultValue_ParamOne );
#if AU_DEBUG_DISPATCHER
mDebugDispatcher = new AUDebugDispatcher (this);
#endif
mLeftFilter = new FirFilter(200);
mLeftFilter->setCoeffecients(lp_200, 200);
mRightFilter = new FirFilter(200);
mRightFilter->setCoeffecients(lp_200, 200);
}
示例3: AUEffectBase
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// AUPulseDetector::AUPulseDetector
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AUPulseDetector::AUPulseDetector(AudioUnit component)
: AUEffectBase(component),
mChildObject(NULL)
{
CreateElements();
CAStreamBasicDescription monoDesc;
monoDesc.SetAUCanonical (1, false);
monoDesc.mSampleRate = 44100.;
GetOutput(0)->SetStreamFormat(monoDesc);
GetInput(0)->SetStreamFormat(monoDesc);
Globals()->UseIndexedParameters (5);
Globals()->SetParameter (kPulseThreshold, kPulseThresholdDefault);
Globals()->SetParameter (kPulseLength, kPulseLengthDefault);
Globals()->SetParameter (kPulseRestTime, kPulseRestTimeDefault);
Globals()->SetParameter (kDoPulseDetection, kDoPulseDetectionDefault);
Globals()->SetParameter (kWritePulseStats, 0);
mPulseTimeStats = new PulseTS[kPulseTSSize];
#if AU_DEBUG_DISPATCHER
mDebugDispatcher = new AUDebugDispatcher (this);
#endif
}
示例4: HasDynamicScope
OSStatus CAAudioUnit::ConfigureDynamicScope (AudioUnitScope inScope,
UInt32 inNumElements,
UInt32 *inChannelsPerElement,
Float64 inSampleRate)
{
SInt32 numChannels = 0;
bool isDyamic = HasDynamicScope (inScope, numChannels);
if (isDyamic == false)
return kAudioUnitErr_InvalidProperty;
//lets to a sanity check...
// if numChannels == -1, then it can do "any"...
if (numChannels > 0) {
SInt32 count = 0;
for (unsigned int i = 0; i < inNumElements; ++i)
count += inChannelsPerElement[i];
if (count > numChannels)
return kAudioUnitErr_InvalidPropertyValue;
}
OSStatus result = SetElementCount (inScope, inNumElements);
if (result)
return result;
CAStreamBasicDescription desc;
desc.mSampleRate = inSampleRate;
for (unsigned int i = 0; i < inNumElements; ++i) {
desc.SetCanonical (inChannelsPerElement[i], false);
result = SetFormat (inScope, i, desc);
if (result)
return result;
}
return noErr;
}
示例5: GetFormat
OSStatus CAAudioUnit::SetNumberChannels (AudioUnitScope inScope,
AudioUnitElement inEl,
UInt32 inChans)
{
// set this as the output of the AU
CAStreamBasicDescription desc;
OSStatus result = GetFormat (inScope, inEl, desc);
if (result) return result;
desc.SetCanonical (inChans, desc.IsInterleaved());
result = SetFormat (inScope, inEl, desc);
return result;
}
示例6:
Minim::AudioFormat::AudioFormat( float sampleRate, int numberOfChannels )
{
CAStreamBasicDescription streamDesc;
streamDesc.mSampleRate = 44100.0f;
streamDesc.SetAUCanonical( numberOfChannels, true );
mChannels = streamDesc.mChannelsPerFrame;
mSampleRate = streamDesc.mSampleRate;
mFrameRate = streamDesc.mSampleRate;
mFrameSize = streamDesc.mBytesPerFrame;
mSampleSizeInBits = streamDesc.mBitsPerChannel;
mBigEndian = (streamDesc.mFormatFlags & kLinearPCMFormatFlagIsBigEndian);
}
示例7: sizeof
void AUBufferList::Allocate(const CAStreamBasicDescription &format, UInt32 nFrames)
{
UInt32 nStreams;
if (format.IsInterleaved()) {
nStreams = 1;
} else {
nStreams = format.mChannelsPerFrame;
}
// careful -- the I/O thread could be running!
if (nStreams > mAllocatedStreams) {
size_t theHeaderSize = sizeof(AudioBufferList) - sizeof(AudioBuffer);
mPtrs = (AudioBufferList *)CA_realloc(mPtrs,
SafeMultiplyAddUInt32(nStreams, sizeof(AudioBuffer), theHeaderSize));
mAllocatedStreams = nStreams;
}
UInt32 bytesPerStream = SafeMultiplyAddUInt32(nFrames, format.mBytesPerFrame, 0xF) & ~0xF;
UInt32 nBytes = SafeMultiplyAddUInt32(nStreams, bytesPerStream, 0);
if (nBytes > mAllocatedBytes) {
if (mExternalMemory) {
mExternalMemory = false;
mMemory = NULL;
}
mMemory = (Byte *)CA_realloc(mMemory, nBytes);
mAllocatedBytes = nBytes;
}
mAllocatedFrames = nFrames;
mPtrState = kPtrsInvalid;
}
示例8: offsetof
void AUBufferList::Allocate(const CAStreamBasicDescription &format, UInt32 nFrames)
{
UInt32 nStreams;
UInt32 channelsPerStream;
if (format.IsInterleaved()) {
nStreams = 1;
channelsPerStream = format.mChannelsPerFrame;
} else {
nStreams = format.mChannelsPerFrame;
channelsPerStream = 1;
}
// careful -- the I/O thread could be running!
if (nStreams > mAllocatedStreams) {
mPtrs = (AudioBufferList *)CA_realloc(mPtrs, offsetof(AudioBufferList, mBuffers) + nStreams * sizeof(AudioBuffer));
mAllocatedStreams = nStreams;
}
UInt32 bytesPerStream = (nFrames * format.mBytesPerFrame + 0xF) & ~0xF;
UInt32 nBytes = nStreams * bytesPerStream;
if (nBytes > mAllocatedBytes) {
if (mExternalMemory) {
mExternalMemory = false;
mMemory = NULL;
}
mMemory = (Byte *)CA_realloc(mMemory, nBytes);
mAllocatedBytes = nBytes;
}
mAllocatedFrames = nFrames;
mPtrState = kPtrsInvalid;
}
示例9: delete
void SonogramViewDemo::AllocateBuffers()
{
mBlockSize = 1024;
mNumBins = mBlockSize>>1;
if (mSpectrumBuffer) {
// delete calls deallocate
delete (mSpectrumBuffer);
}
mSpectrumBuffer = new CARingBuffer();
mSpectrumBuffer->Allocate(GetNumberOfChannels(), mNumBins*sizeof(Float32), kMaxSonogramLatency);
CAStreamBasicDescription bufClientDesc;
bufClientDesc.SetCanonical(GetNumberOfChannels(), false);
bufClientDesc.mSampleRate = GetSampleRate();
UInt32 frameLength = kDefaultValue_BufferSize*sizeof(Float32);
if (mFetchingBufferList) {
mFetchingBufferList->DeallocateBuffers();
delete(mFetchingBufferList);
}
mFetchingBufferList = CABufferList::New("fetch buffer", bufClientDesc );
mFetchingBufferList->AllocateBuffers(frameLength);
if (mSpectralDataBufferList) {
mSpectralDataBufferList->DeallocateBuffers();
delete(mSpectralDataBufferList);
}
mSpectralDataBufferList = CABufferList::New("temp buffer", bufClientDesc );
mSpectralDataBufferList->AllocateBuffers(frameLength);
memset (&mRenderStamp, 0, sizeof(AudioTimeStamp));
mRenderStamp.mFlags = kAudioTimeStampSampleTimeValid;
mSpectralProcessor.free();
mSpectralProcessor = new CASpectralProcessor(mBlockSize, mNumBins, GetNumberOfChannels(), GetMaxFramesPerSlice());
if (mMinAmp) free(mMinAmp);
mMinAmp = (Float32*) calloc(GetNumberOfChannels(), sizeof(Float32));
if (mMaxAmp) free(mMaxAmp);
mMaxAmp = (Float32*) calloc(GetNumberOfChannels(), sizeof(Float32));
}
示例10: ChangeStreamFormat
/*! @method ChangeStreamFormat */
OSStatus AUPannerBase::ChangeStreamFormat (
AudioUnitScope inScope,
AudioUnitElement inElement,
const CAStreamBasicDescription & inPrevFormat,
const CAStreamBasicDescription & inNewFormat)
{
if (inScope == kAudioUnitScope_Input && !InputChannelConfigIsSupported(inNewFormat.NumberChannels()))
return kAudioUnitErr_FormatNotSupported;
if (inScope == kAudioUnitScope_Output && !OutputChannelConfigIsSupported(inNewFormat.NumberChannels()))
return kAudioUnitErr_FormatNotSupported;
if (inNewFormat.NumberChannels() != inPrevFormat.NumberChannels())
RemoveAudioChannelLayout(inScope, inElement);
return AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
}
示例11: Stop
void CAAudioFileWriter::SetFile(const FSRef &parentDir, CFStringRef filename, AudioFileTypeID filetype, const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
Stop();
CancelAndDisposeBuffers();
delete mFile; mFile = NULL;
mFile = new CAAudioFile;
mFile->CreateNew(parentDir, filename, filetype, dataFormat, layout ? &layout->Layout() : NULL);
const CAStreamBasicDescription &fileFmt = mFile->GetFileDataFormat();
CAStreamBasicDescription iofmt;
iofmt.SetCanonical(fileFmt.mChannelsPerFrame, false); // deinterleaved
iofmt.mSampleRate = fileFmt.mSampleRate;
mFile->SetClientFormat(iofmt, NULL);
SetFormat(iofmt);
}
示例12: RemoveFromWorkerThread
void ZKMORFileWriter::CreateFile(const FSRef &parentDir, CFStringRef filename, AudioFileTypeID filetype, const CAStreamBasicDescription &dataFormat, const CAAudioChannelLayout *layout)
{
RemoveFromWorkerThread();
FlushAndClose();
DisposeBuffers();
delete mFile; mFile = NULL;
mFile = new CAAudioFile;
mFile->CreateNew(parentDir, filename, filetype, dataFormat, layout ? &layout->Layout() : NULL);
const CAStreamBasicDescription &fileFmt = mFile->GetFileDataFormat();
CAStreamBasicDescription iofmt;
iofmt.SetCanonical(fileFmt.mChannelsPerFrame, false); // deinterleaved
iofmt.mSampleRate = fileFmt.mSampleRate;
SetClientDataFormat(iofmt);
AddToWorkerThread();
}
示例13: if
ComponentResult ElCAJAS::ChangeStreamFormat(AudioUnitScope inScope,
AudioUnitElement inElement,
const CAStreamBasicDescription& inPrevFormat,
const CAStreamBasicDescription& inNewFormat)
{
if (inScope == 1) {
int reqChans = inNewFormat.NumberChannels();
if (reqChans > 2 || reqChans < 1)
return kAudioUnitErr_FormatNotSupported;
else
return noErr;
} else if (inScope == 2) {
int reqChans = inNewFormat.NumberChannels();
if (reqChans != 2)
return kAudioUnitErr_FormatNotSupported;
else
return noErr;
}
return kAudioUnitErr_FormatNotSupported;
}
示例14: CARingBuffer
void WaveformViewDemo::AllocateBuffers()
{
if (mAudioBuffer) delete (mAudioBuffer);
mAudioBuffer = new CARingBuffer();
mAudioBuffer->Allocate(GetNumberOfChannels(), sizeof(Float32), kDefaultValue_BufferSize);
// unlike the spectral buffers we write one number at a time, the spectral ones do entire analysis at a time
CAStreamBasicDescription bufClientDesc;
bufClientDesc.SetCanonical(GetNumberOfChannels(), false);
bufClientDesc.mSampleRate = GetSampleRate();
if (mFetchingBufferList) {
mFetchingBufferList->DeallocateBuffers();
delete(mFetchingBufferList);
}
mFetchingBufferList = CABufferList::New("fetch buffer", bufClientDesc );
mFetchingBufferList->AllocateBuffers(sizeof(Float32) * kDefaultValue_BufferSize);
memset (&mRenderStamp, 0, sizeof(AudioTimeStamp));
mRenderStamp.mFlags = kAudioTimeStampSampleTimeValid;
}
示例15:
// this should NOT be called while I/O is in process
void AUBufferList::UseExternalBuffer(const CAStreamBasicDescription &format, const AudioUnitExternalBuffer &buf)
{
UInt32 alignedSize = buf.size & ~0xF;
if (mMemory != NULL && alignedSize >= mAllocatedBytes) {
// don't accept the buffer if we already have one and it's big enough
// if we don't already have one, we don't need one
Byte *oldMemory = mMemory;
mMemory = buf.buffer;
mAllocatedBytes = alignedSize;
// from Allocate(): nBytes = nStreams * nFrames * format.mBytesPerFrame;
// thus: nFrames = nBytes / (nStreams * format.mBytesPerFrame)
mAllocatedFrames = mAllocatedBytes / (format.NumberChannelStreams() * format.mBytesPerFrame);
mExternalMemory = true;
free(oldMemory);
}
}