本文整理汇总了C++中CAStreamBasicDescription::SetAUCanonical方法的典型用法代码示例。如果您正苦于以下问题:C++ CAStreamBasicDescription::SetAUCanonical方法的具体用法?C++ CAStreamBasicDescription::SetAUCanonical怎么用?C++ CAStreamBasicDescription::SetAUCanonical使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CAStreamBasicDescription
的用法示例。
在下文中一共展示了CAStreamBasicDescription::SetAUCanonical方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: AUEffectBase
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// AUPulseDetector::AUPulseDetector
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AUPulseDetector::AUPulseDetector(AudioUnit component)
: AUEffectBase(component),
mChildObject(NULL)
{
CreateElements();
CAStreamBasicDescription monoDesc;
monoDesc.SetAUCanonical (1, false);
monoDesc.mSampleRate = 44100.;
GetOutput(0)->SetStreamFormat(monoDesc);
GetInput(0)->SetStreamFormat(monoDesc);
Globals()->UseIndexedParameters (5);
Globals()->SetParameter (kPulseThreshold, kPulseThresholdDefault);
Globals()->SetParameter (kPulseLength, kPulseLengthDefault);
Globals()->SetParameter (kPulseRestTime, kPulseRestTimeDefault);
Globals()->SetParameter (kDoPulseDetection, kDoPulseDetectionDefault);
Globals()->SetParameter (kWritePulseStats, 0);
mPulseTimeStats = new PulseTS[kPulseTSSize];
#if AU_DEBUG_DISPATCHER
mDebugDispatcher = new AUDebugDispatcher (this);
#endif
}
示例2:
Minim::AudioFormat::AudioFormat( float sampleRate, int numberOfChannels )
{
CAStreamBasicDescription streamDesc;
streamDesc.mSampleRate = 44100.0f;
streamDesc.SetAUCanonical( numberOfChannels, true );
mChannels = streamDesc.mChannelsPerFrame;
mSampleRate = streamDesc.mSampleRate;
mFrameRate = streamDesc.mSampleRate;
mFrameSize = streamDesc.mBytesPerFrame;
mSampleSizeInBits = streamDesc.mBitsPerChannel;
mBigEndian = (streamDesc.mFormatFlags & kLinearPCMFormatFlagIsBigEndian);
}
示例3: SetupRemoteIO
int SetupRemoteIO (AudioUnit& inRemoteIOUnit, AURenderCallbackStruct inRenderProc, CAStreamBasicDescription& outFormat)
{
try {
// Open the output unit
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
XThrowIfError(AudioComponentInstanceNew(comp, &inRemoteIOUnit), "couldn't open the remote I/O unit");
UInt32 one = 1;
XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one)), "couldn't enable input on the remote I/O unit");
XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &inRenderProc, sizeof(inRenderProc)), "couldn't set remote i/o render callback");
// NEWL: Establecer el formato canónico no de los AudioUnits sino del sistema de entrada/salida:
// LPCM, no entrelazado, datos enteros con signo de 16 bits.
// outFormat.SetCanonical(2, false);
// OLDL: set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point
outFormat.SetAUCanonical(2, false);
outFormat.mSampleRate = SAMPRATE;
XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's output client format");
XThrowIfError(AudioUnitSetProperty(inRemoteIOUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &outFormat, sizeof(outFormat)), "couldn't set the remote I/O unit's input client format");
XThrowIfError(AudioUnitInitialize(inRemoteIOUnit), "couldn't initialize the remote I/O unit");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
return 1;
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
return 1;
}
return 0;
}
示例4: DoAQOfflineRender
void DoAQOfflineRender(CFURLRef sourceURL, CFURLRef destinationURL)
{
// main audio queue code
try {
AQTestInfo myInfo;
myInfo.mDone = false;
myInfo.mFlushed = false;
myInfo.mCurrentPacket = 0;
// get the source file
XThrowIfError(AudioFileOpenURL(sourceURL, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile), "AudioFileOpen failed");
UInt32 size = sizeof(myInfo.mDataFormat);
XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat), "couldn't get file's data format");
printf ("File format: "); myInfo.mDataFormat.Print();
// create a new audio queue output
XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, // The data format of the audio to play. For linear PCM, only interleaved formats are supported.
AQTestBufferCallback, // A callback function to use with the playback audio queue.
&myInfo, // A custom data structure for use with the callback function.
CFRunLoopGetCurrent(), // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called.
// If you specify NULL, the callback is invoked on one of the audio queue’s internal threads.
kCFRunLoopCommonModes, // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter.
0, // Reserved for future use. Must be 0.
&myInfo.mQueue), // On output, the newly created playback audio queue object.
"AudioQueueNew failed");
UInt32 bufferByteSize;
// we need to calculate how many packets we read at a time and how big a buffer we need
// we base this on the size of the packets in the file and an approximate duration for each buffer
{
bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
// first check to see what the max size of a packet is - if it is bigger
// than our allocation default size, that needs to become larger
UInt32 maxPacketSize;
size = sizeof(maxPacketSize);
XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
// adjust buffer size to represent about a second of audio based on this format
CalculateBytesForTime(myInfo.mDataFormat, maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
if (isFormatVBR) {
myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
} else {
myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
}
printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
}
// if the file has a magic cookie, we should get it and set it on the AQ
size = sizeof(UInt32);
OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);
if (!result && size) {
char* cookie = new char [size];
XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
delete [] cookie;
}
// channel layout?
OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, NULL);
AudioChannelLayout *acl = NULL;
if (err == noErr && size > 0) {
acl = (AudioChannelLayout *)malloc(size);
XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout");
XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue");
}
//allocate the input read buffer
XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffer), "AudioQueueAllocateBuffer");
// prepare a canonical interleaved capture format
CAStreamBasicDescription captureFormat;
captureFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
captureFormat.SetAUCanonical(myInfo.mDataFormat.mChannelsPerFrame, true); // interleaved
XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue, &captureFormat, acl), "set offline render format");
ExtAudioFileRef captureFile;
// prepare a 16-bit int file format, sample channel count and sample rate
CAStreamBasicDescription dstFormat;
dstFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
dstFormat.mChannelsPerFrame = myInfo.mDataFormat.mChannelsPerFrame;
dstFormat.mFormatID = kAudioFormatLinearPCM;
dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
dstFormat.mBitsPerChannel = 16;
dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
dstFormat.mFramesPerPacket = 1;
// create the capture file
XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL");
// set the capture file's client format to be the canonical format from the queue
XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format");
//.........这里部分代码省略.........