本文整理汇总了C++中AudioUnitInitialize函数的典型用法代码示例。如果您正苦于以下问题:C++ AudioUnitInitialize函数的具体用法?C++ AudioUnitInitialize怎么用?C++ AudioUnitInitialize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AudioUnitInitialize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gst_core_audio_open
gboolean
gst_core_audio_open (GstCoreAudio * core_audio)
{
OSStatus status;
/* core_audio->osxbuf is already locked at this point */
core_audio->cached_caps_valid = FALSE;
gst_caps_replace (&core_audio->cached_caps, NULL);
if (!gst_core_audio_open_impl (core_audio))
return FALSE;
/* Add property listener */
status = AudioUnitAddPropertyListener (core_audio->audiounit,
kAudioUnitProperty_AudioChannelLayout, _audio_unit_property_listener,
core_audio);
if (status != noErr) {
GST_ERROR_OBJECT (core_audio, "Failed to add audio channel layout property "
"listener for AudioUnit: %d", (int) status);
}
status = AudioUnitAddPropertyListener (core_audio->audiounit,
kAudioUnitProperty_StreamFormat, _audio_unit_property_listener,
core_audio);
if (status != noErr) {
GST_ERROR_OBJECT (core_audio, "Failed to add stream format property "
"listener for AudioUnit: %d", (int) status);
}
/* Initialize the AudioUnit. We keep the audio unit initialized early so that
* we can probe the underlying device. */
status = AudioUnitInitialize (core_audio->audiounit);
if (status) {
GST_ERROR_OBJECT (core_audio, "Failed to initialize AudioUnit: %d",
(int) status);
return FALSE;
}
return TRUE;
}
示例2: gst_core_audio_initialize
gboolean
gst_core_audio_initialize (GstCoreAudio * core_audio,
AudioStreamBasicDescription format, GstCaps * caps, gboolean is_passthrough)
{
guint32 frame_size;
OSStatus status;
GST_DEBUG_OBJECT (core_audio,
"Initializing: passthrough:%d caps:%" GST_PTR_FORMAT, is_passthrough,
caps);
if (!gst_core_audio_initialize_impl (core_audio, format, caps,
is_passthrough, &frame_size)) {
goto error;
}
if (core_audio->is_src) {
/* create AudioBufferList needed for recording */
core_audio->recBufferList =
buffer_list_alloc (format.mChannelsPerFrame,
frame_size * format.mBytesPerFrame);
}
/* Initialize the AudioUnit */
status = AudioUnitInitialize (core_audio->audiounit);
if (status) {
GST_ERROR_OBJECT (core_audio, "Failed to initialise AudioUnit: %d",
(int) status);
goto error;
}
return TRUE;
error:
if (core_audio->is_src && core_audio->recBufferList) {
buffer_list_free (core_audio->recBufferList);
core_audio->recBufferList = NULL;
}
return FALSE;
}
示例3: AudioUnitInitialize
bool auLoader::initialize()
{
/** Algorithm: **/
/** Call the AU's Initialize method **/
OSStatus err = AudioUnitInitialize(m_plugin);
if(err != noErr)
{
debug(LOG_ERROR, "Could not initialize plugin");
return false;
}
/** Set up output buffers **/
m_buffer_list = (AudioBufferList *)malloc(offsetof(AudioBufferList, mBuffers[MAX_CHANNELS]));
m_buffer_list->mNumberBuffers = MAX_CHANNELS;
/** Connect input properties **/
AURenderCallbackStruct callback;
callback.inputProc = this->inputCallback;
callback.inputProcRefCon = this;
/** Set up render notifications **/
err = AudioUnitSetProperty(m_plugin, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
0, &callback, sizeof(callback));
if(err != noErr)
{
debug(LOG_WARN, "Could not configure inputs");
}
err = AudioUnitAddRenderNotify(m_plugin, this->renderNotify, NULL);
if(err != noErr)
{
debug(LOG_ERROR, "Could not set up render notification");
}
debug(LOG_INFO, "AU initialized");
return true;
}
示例4: iOSCoreAudioInit
void iOSCoreAudioInit()
{
if (!audioInstance) {
OSErr err;
// first, grab the default output
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
// create our instance
err = AudioComponentInstanceNew(defaultOutput, &audioInstance);
if (err != noErr) {
audioInstance = nil;
return;
}
// create our callback so we can give it the audio data
AURenderCallbackStruct input;
input.inputProc = iOSCoreAudioCallback;
input.inputProcRefCon = NULL;
err = AudioUnitSetProperty(audioInstance,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// setup the audio format we'll be using (stereo pcm)
AudioStreamBasicDescription streamFormat;
memset(&streamFormat, 0, sizeof(streamFormat));
streamFormat.mSampleRate = SAMPLE_RATE;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamFormat.mBitsPerChannel = sizeof(AudioSampleType) * 8;
streamFormat.mChannelsPerFrame = 2;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = (streamFormat.mBitsPerChannel / 8) * streamFormat.mChannelsPerFrame;
streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame * streamFormat.mFramesPerPacket;
err = AudioUnitSetProperty(audioInstance,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// k, all setup, so init
err = AudioUnitInitialize(audioInstance);
if (err != noErr) {
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// finally start playback
err = AudioOutputUnitStart(audioInstance);
if (err != noErr) {
AudioUnitUninitialize(audioInstance);
AudioComponentInstanceDispose(audioInstance);
audioInstance = nil;
return;
}
// we're good to go
}
}
示例5: AuHAL_open
//.........这里部分代码省略.........
psize = sizeof(AudioDeviceID);
/* for input, select device AFTER enabling IO */
AudioUnitSetProperty(cdata->inunit,kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, isInput, &dev, psize);
aunit = &(cdata->inunit);
}
else {
AudioComponentInstanceNew(HALOutput, &(cdata->outunit));
psize = sizeof(AudioDeviceID);
/* for output, select device BEFORE enabling IO */
AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, isInput, &dev, psize);
enableIO = 1;
AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO));
enableIO = 0;
AudioUnitSetProperty(cdata->outunit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO));
aunit = &(cdata->outunit);
}
/* now set the buffer size */
psize = sizeof(AudioDeviceID);
AudioUnitGetProperty(*aunit, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, isInput, &dev, &psize);
prop.mSelector = kAudioDevicePropertyBufferFrameSize;
psize = 4;
AudioObjectSetPropertyData(dev, &prop, 0, NULL, psize, &bufframes);
psize = sizeof(maxFPS);
AudioUnitGetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global, isInput, &maxFPS, &psize);
AudioUnitSetProperty(*aunit, kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global, isInput, &bufframes,
sizeof(UInt32));
/* set the stream properties */
psize = sizeof(AudioStreamBasicDescription);
AudioUnitGetProperty(*aunit, kAudioUnitProperty_StreamFormat,
(isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input),
isInput, &format, &psize);
format.mSampleRate = srate;
format.mFormatID = kAudioFormatLinearPCM;
format.mFormatFlags = kAudioFormatFlagsCanonical |
kLinearPCMFormatFlagIsNonInterleaved;
format.mBytesPerPacket = sizeof(AudioUnitSampleType);
format.mFramesPerPacket = 1;
format.mBytesPerFrame = sizeof(AudioUnitSampleType);
format.mChannelsPerFrame = nchnls;
format.mBitsPerChannel = sizeof(AudioUnitSampleType)*8;
AudioUnitSetProperty(*aunit, kAudioUnitProperty_StreamFormat,
(isInput ? kAudioUnitScope_Output : kAudioUnitScope_Input),
isInput, &format,
sizeof(AudioStreamBasicDescription));
/* set the callbacks and open the device */
if(!isInput) {
AURenderCallbackStruct output;
output.inputProc = Csound_Render;
output.inputProcRefCon = cdata;
AudioUnitSetProperty(*aunit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, isInput, &output, sizeof(output));
AudioUnitInitialize(*aunit);
AudioOutputUnitStart(*aunit);
csound->Message(csound,
Str("***** AuHAL module: output device open with %d "
"buffer frames\n"),
bufframes);
cdata->disp = 0;
}
else {
AURenderCallbackStruct input;
AudioBufferList *CAInputData =
(AudioBufferList*)malloc(sizeof(UInt32)
+ cdata->inchnls * sizeof(AudioBuffer));
CAInputData->mNumberBuffers = cdata->inchnls;
for (i = 0; i < cdata->inchnls; i++) {
CAInputData->mBuffers[i].mNumberChannels = 1;
CAInputData->mBuffers[i].mDataByteSize =
bufframes * sizeof(AudioUnitSampleType);
CAInputData->mBuffers[i].mData =
calloc(bufframes, sizeof(AudioUnitSampleType));
}
cdata->inputdata = CAInputData;
input.inputProc = Csound_Input;
input.inputProcRefCon = cdata;
AudioUnitSetProperty(*aunit, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Input, isInput, &input, sizeof(input));
AudioUnitInitialize(*aunit);
AudioOutputUnitStart(*aunit);
csound->Message(csound,
"***** AuHAL module: input device open with %d buffer frames\n",
bufframes);
}
if(!cdata->disp)
csound->Message(csound,
"==========================================================\n");
cdata->disp = 0;
return 0;
}
示例6: memset
int CoreAudioDriver::init( unsigned bufferSize )
{
OSStatus err = noErr;
m_pOut_L = new float[ m_nBufferSize ];
m_pOut_R = new float[ m_nBufferSize ];
memset ( m_pOut_L, 0, m_nBufferSize * sizeof( float ) );
memset ( m_pOut_R, 0, m_nBufferSize * sizeof( float ) );
// Get Component
AudioComponent compOutput;
AudioComponentDescription descAUHAL;
descAUHAL.componentType = kAudioUnitType_Output;
descAUHAL.componentSubType = kAudioUnitSubType_HALOutput;
descAUHAL.componentManufacturer = kAudioUnitManufacturer_Apple;
descAUHAL.componentFlags = 0;
descAUHAL.componentFlagsMask = 0;
compOutput = AudioComponentFindNext( NULL, &descAUHAL );
if ( compOutput == NULL ) {
ERRORLOG( "Error in FindNextComponent" );
//exit (-1);
}
err = AudioComponentInstanceNew( compOutput, &m_outputUnit );
if ( err != noErr ) {
ERRORLOG( "Error Opening Component" );
}
// Get Current Output Device
retrieveDefaultDevice();
// Set AUHAL to Current Device
err = AudioUnitSetProperty(
m_outputUnit,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
0,
&m_outputDevice,
sizeof( m_outputDevice )
);
if ( err != noErr ) {
ERRORLOG( "Could not set Current Device" );
}
AudioStreamBasicDescription asbdesc;
asbdesc.mSampleRate = ( Float64 )m_nSampleRate;
asbdesc.mFormatID = kAudioFormatLinearPCM;
asbdesc.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
asbdesc.mBytesPerPacket = sizeof( Float32 );
asbdesc.mFramesPerPacket = 1;
asbdesc.mBytesPerFrame = sizeof( Float32 );
asbdesc.mChannelsPerFrame = 2; // comix: was set to 1
asbdesc.mBitsPerChannel = 32;
err = AudioUnitSetProperty(
m_outputUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&asbdesc,
sizeof( AudioStreamBasicDescription )
);
// Set Render Callback
AURenderCallbackStruct out;
out.inputProc = renderProc;
out.inputProcRefCon = ( void * )this;
err = AudioUnitSetProperty(
m_outputUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&out,
sizeof( out )
);
if ( err != noErr ) {
ERRORLOG( "Could not Set Render Callback" );
}
//Initialize AUHAL
err = AudioUnitInitialize( m_outputUnit );
if ( err != noErr ) {
ERRORLOG( "Could not Initialize AudioUnit" );
}
return 0;
}
示例7: RingBuffer
//.........这里部分代码省略.........
if (audioComponent == NULL)
{
return;
}
error = AudioComponentInstanceNew(audioComponent, &_au);
if (error != noErr)
{
return;
}
}
else
{
ComponentDescription audioDesc;
audioDesc.componentType = kAudioUnitType_Output;
audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioDesc.componentFlags = 0;
audioDesc.componentFlagsMask = 0;
Component audioComponent = FindNextComponent(NULL, &audioDesc);
if (audioComponent == NULL)
{
return;
}
error = OpenAComponent(audioComponent, &_au);
if (error != noErr)
{
return;
}
}
#else
ComponentDescription audioDesc;
audioDesc.componentType = kAudioUnitType_Output;
audioDesc.componentSubType = kAudioUnitSubType_DefaultOutput;
audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioDesc.componentFlags = 0;
audioDesc.componentFlagsMask = 0;
Component audioComponent = FindNextComponent(NULL, &audioDesc);
if (audioComponent == NULL)
{
return;
}
error = OpenAComponent(audioComponent, &_au);
if (error != noErr)
{
return;
}
#endif
// Set the render callback
AURenderCallbackStruct callback;
callback.inputProc = &CoreAudioOutputRenderCallback;
callback.inputProcRefCon = _buffer;
error = AudioUnitSetProperty(_au,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&callback,
sizeof(callback) );
if(error != noErr)
{
return;
}
// Set up the audio unit for audio streaming
AudioStreamBasicDescription outputFormat;
outputFormat.mSampleRate = SPU_SAMPLE_RATE;
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked;
outputFormat.mBytesPerPacket = SPU_SAMPLE_SIZE;
outputFormat.mFramesPerPacket = 1;
outputFormat.mBytesPerFrame = SPU_SAMPLE_SIZE;
outputFormat.mChannelsPerFrame = SPU_NUMBER_CHANNELS;
outputFormat.mBitsPerChannel = SPU_SAMPLE_RESOLUTION;
error = AudioUnitSetProperty(_au,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&outputFormat,
sizeof(outputFormat) );
if(error != noErr)
{
return;
}
// Initialize our new audio unit
error = AudioUnitInitialize(_au);
if(error != noErr)
{
return;
}
}
示例8: sa_stream_open
int
sa_stream_open(sa_stream_t *s) {
ComponentDescription desc;
Component comp;
AURenderCallbackStruct input;
AudioStreamBasicDescription fmt;
if (s == NULL) {
return SA_ERROR_NO_INIT;
}
if (s->output_unit != NULL) {
return SA_ERROR_INVALID;
}
/*
* Open the default audio output unit.
*/
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_DefaultOutput;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
comp = FindNextComponent(NULL, &desc);
if (comp == NULL) {
return SA_ERROR_NO_DEVICE;
}
if (OpenAComponent(comp, &s->output_unit) != noErr) {
return SA_ERROR_NO_DEVICE;
}
/*
* Set up the render callback used to feed audio data into the output unit.
*/
input.inputProc = audio_callback;
input.inputProcRefCon = s;
if (AudioUnitSetProperty(s->output_unit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, 0, &input, sizeof(input)) != 0) {
return SA_ERROR_SYSTEM;
}
/*
* Set up the format description for our audio data. Apple uses the
* following terminology:
*
* sample = a single data value for one channel
* frame = a set of samples that includes one sample for each channel
* packet = the smallest indivisible block of audio data; for uncompressed
* audio (which is what we have), this is one frame
* rate = the number of complete frames per second
*
* Note that this definition of frame differs from, well, pretty much everyone
* else's. See this really long link for more info:
*
* http://developer.apple.com/documentation/MusicAudio/Reference/CoreAudioDataTypesRef/Reference/reference.html#//apple_ref/c/tdef/AudioStreamBasicDescription
*/
fmt.mFormatID = kAudioFormatLinearPCM;
fmt.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
#ifdef __BIG_ENDIAN__
kLinearPCMFormatFlagIsBigEndian |
#endif
kLinearPCMFormatFlagIsPacked;
fmt.mSampleRate = s->rate;
fmt.mChannelsPerFrame = s->n_channels;
fmt.mBitsPerChannel = s->bytes_per_ch * 8;
fmt.mFramesPerPacket = 1; /* uncompressed audio */
fmt.mBytesPerFrame = fmt.mChannelsPerFrame * fmt.mBitsPerChannel / 8;
fmt.mBytesPerPacket = fmt.mBytesPerFrame * fmt.mFramesPerPacket;
/*
* We're feeding data in to the output bus of the audio system, so we set
* the format description on the input scope of the device, using the very
* obvious element value of 0 to indicate the output bus.
*
* http://developer.apple.com/technotes/tn2002/tn2091.html
*/
if (AudioUnitSetProperty(s->output_unit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, 0, &fmt, sizeof(AudioStreamBasicDescription)) != 0) {
return SA_ERROR_NOT_SUPPORTED;
}
if (AudioUnitInitialize(s->output_unit) != 0) {
return SA_ERROR_SYSTEM;
}
return SA_SUCCESS;
}
示例9: audiounit_stream_init
//.........这里部分代码省略.........
ss.mBitsPerChannel = 16;
ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger;
break;
case CUBEB_SAMPLE_S16BE:
ss.mBitsPerChannel = 16;
ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger |
kAudioFormatFlagIsBigEndian;
break;
case CUBEB_SAMPLE_FLOAT32LE:
ss.mBitsPerChannel = 32;
ss.mFormatFlags |= kAudioFormatFlagIsFloat;
break;
case CUBEB_SAMPLE_FLOAT32BE:
ss.mBitsPerChannel = 32;
ss.mFormatFlags |= kAudioFormatFlagIsFloat |
kAudioFormatFlagIsBigEndian;
break;
default:
return CUBEB_ERROR_INVALID_FORMAT;
}
ss.mFormatID = kAudioFormatLinearPCM;
ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked;
ss.mSampleRate = stream_params.rate;
ss.mChannelsPerFrame = stream_params.channels;
ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame;
ss.mFramesPerPacket = 1;
ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_DefaultOutput;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060
comp = FindNextComponent(NULL, &desc);
#else
comp = AudioComponentFindNext(NULL, &desc);
#endif
assert(comp);
stm = calloc(1, sizeof(*stm));
assert(stm);
stm->context = context;
stm->data_callback = data_callback;
stm->state_callback = state_callback;
stm->user_ptr = user_ptr;
stm->sample_spec = ss;
r = pthread_mutex_init(&stm->mutex, NULL);
assert(r == 0);
stm->frames_played = 0;
stm->frames_queued = 0;
#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060
r = OpenAComponent(comp, &stm->unit);
#else
r = AudioComponentInstanceNew(comp, &stm->unit);
#endif
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
input.inputProc = audiounit_output_callback;
input.inputProcRefCon = stm;
r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global, 0, &input, sizeof(input));
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
0, &ss, sizeof(ss));
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS;
if (buffer_size % ss.mBytesPerFrame != 0) {
buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame);
}
assert(buffer_size % ss.mBytesPerFrame == 0);
r = AudioUnitInitialize(stm->unit);
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
*stream = stm;
return CUBEB_OK;
}
示例10: zeromem
//.........这里部分代码省略.........
#endif
AudioStreamBasicDescription strdesc;
zeromem(&strdesc, sizeof(strdesc));
UInt32 size = sizeof(strdesc);
result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &strdesc, &size);
ERR_FAIL_COND_V(result != noErr, FAILED);
switch (strdesc.mChannelsPerFrame) {
case 2: // Stereo
case 4: // Surround 3.1
case 6: // Surround 5.1
case 8: // Surround 7.1
channels = strdesc.mChannelsPerFrame;
break;
default:
// Unknown number of channels, default to stereo
channels = 2;
break;
}
zeromem(&strdesc, sizeof(strdesc));
size = sizeof(strdesc);
result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size);
ERR_FAIL_COND_V(result != noErr, FAILED);
switch (strdesc.mChannelsPerFrame) {
case 1: // Mono
capture_channels = 1;
break;
case 2: // Stereo
capture_channels = 2;
break;
default:
// Unknown number of channels, default to stereo
capture_channels = 2;
break;
}
mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);
zeromem(&strdesc, sizeof(strdesc));
strdesc.mFormatID = kAudioFormatLinearPCM;
strdesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
strdesc.mChannelsPerFrame = channels;
strdesc.mSampleRate = mix_rate;
strdesc.mFramesPerPacket = 1;
strdesc.mBitsPerChannel = 16;
strdesc.mBytesPerFrame = strdesc.mBitsPerChannel * strdesc.mChannelsPerFrame / 8;
strdesc.mBytesPerPacket = strdesc.mBytesPerFrame * strdesc.mFramesPerPacket;
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc));
ERR_FAIL_COND_V(result != noErr, FAILED);
strdesc.mChannelsPerFrame = capture_channels;
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc));
ERR_FAIL_COND_V(result != noErr, FAILED);
int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY);
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
#ifdef OSX_ENABLED
result = AudioUnitSetProperty(audio_unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, kOutputBus, &buffer_frames, sizeof(UInt32));
ERR_FAIL_COND_V(result != noErr, FAILED);
#endif
unsigned int buffer_size = buffer_frames * channels;
samples_in.resize(buffer_size);
input_buf.resize(buffer_size);
input_buffer.resize(buffer_size * 8);
input_position = 0;
input_size = 0;
print_verbose("CoreAudio: detected " + itos(channels) + " channels");
print_verbose("CoreAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
AURenderCallbackStruct callback;
zeromem(&callback, sizeof(AURenderCallbackStruct));
callback.inputProc = &AudioDriverCoreAudio::output_callback;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
ERR_FAIL_COND_V(result != noErr, FAILED);
zeromem(&callback, sizeof(AURenderCallbackStruct));
callback.inputProc = &AudioDriverCoreAudio::input_callback;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));
ERR_FAIL_COND_V(result != noErr, FAILED);
result = AudioUnitInitialize(audio_unit);
ERR_FAIL_COND_V(result != noErr, FAILED);
return OK;
}
示例11: FindNextComponent
void *runPluginLoop(void *plug) {
AudioUnit outputUnit;
OSStatus err = noErr;
// Open the default output unit
ComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_DefaultOutput;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
Component comp = FindNextComponent(NULL, &desc);
if(comp == NULL) {
debug(LOG_ERROR, "FindNextComponent failed");
return NULL;
}
err = OpenAComponent(comp, &outputUnit);
if(comp == NULL) {
debug(LOG_ERROR, "OpenAComponent failed with error code %ld\n", err);
return NULL;
}
// Set up a callback function to generate output to the output unit
AURenderCallbackStruct input;
input.inputProc = processData;
input.inputProcRefCon = plug;
err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
0, &input, sizeof(input));
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = DEF_SAMPLE_RATE;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
| kLinearPCMFormatFlagIsBigEndian
| kLinearPCMFormatFlagIsPacked
| kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = 2;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = 2;
streamFormat.mChannelsPerFrame = 2;
streamFormat.mBitsPerChannel = 16;
err = AudioUnitSetProperty(outputUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, 0, &streamFormat,
sizeof(AudioStreamBasicDescription));
if(err) {
debug(LOG_ERROR, "AudioUnitSetProperty-SF failed with code %4.4s, %ld\n", (char*)&err, err);
return NULL;
}
// Initialize unit
err = AudioUnitInitialize(outputUnit);
if(err) {
debug(LOG_ERROR, "AudioUnitInitialize failed with code %ld\n", err);
return NULL;
}
Float64 outSampleRate;
UInt32 size = sizeof(Float64);
err = AudioUnitGetProperty(outputUnit, kAudioUnitProperty_SampleRate,
kAudioUnitScope_Output, 0, &outSampleRate, &size);
if(err) {
debug(LOG_ERROR, "AudioUnitSetProperty-GF failed with code %4.4s, %ld\n", (char*)&err, err);
return NULL;
}
// Start the rendering
// The DefaultOutputUnit will do any format conversions to the format of the default device
err = AudioOutputUnitStart(outputUnit);
if(err) {
debug(LOG_ERROR, "AudioOutputUnitStart failed with code %ld\n", err);
return NULL;
}
// Loop until this thread is killed
CFRunLoopRun();
// REALLY after you're finished playing STOP THE AUDIO OUTPUT UNIT!!!!!!
// but we never get here because we're running until the process is nuked...
AudioOutputUnitStop(outputUnit);
err = AudioUnitUninitialize(outputUnit);
if(err) {
debug(LOG_ERROR, "AudioUnitUninitialize failed with code %ld\n", err);
return NULL;
}
return NULL;
}
示例12: defined
void InputImplAudioUnit::setup()
{
if( mIsSetup ) return;
OSStatus err = noErr;
//get default input device
if( ! mDevice ) {
mDevice = InputImplAudioUnit::getDefaultDevice();
}
//create AudioOutputUnit
AudioComponent component;
AudioComponentDescription description;
description.componentType = kAudioUnitType_Output;
#if defined( CINDER_MAC )
description.componentSubType = kAudioUnitSubType_HALOutput;
#elif defined( CINDER_COCOA_TOUCH )
description.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
description.componentManufacturer = kAudioUnitManufacturer_Apple;
description.componentFlags = 0;
description.componentFlagsMask = 0;
component = AudioComponentFindNext( NULL, &description );
if( ! component ) {
std::cout << "Error finding next component" << std::endl;
throw;
}
err = AudioComponentInstanceNew( component, &mInputUnit );
if( err != noErr ) {
mInputUnit = NULL;
std::cout << "Error getting output unit" << std::endl;
throw;
}
// Initialize the AU
/*err = AudioUnitInitialize( mInputUnit );
if(err != noErr)
{
std::cout << "failed to initialize HAL Output AU" << std::endl;
throw;
}*/
UInt32 param;
//enable IO on AudioUnit's input scope
param = 1;
err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, ¶m, sizeof( UInt32 ) );
if( err != noErr ) {
std::cout << "Error enable IO on Output unit input" << std::endl;
throw;
}
//disable IO on AudioUnit's output scope
param = 0;
err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, ¶m, sizeof( UInt32 ) );
if( err != noErr ) {
std::cout << "Error disabling IO on Output unit output" << std::endl;
throw;
}
#if defined( CINDER_MAC )
AudioDeviceID nativeDeviceId = static_cast<AudioDeviceID>( mDevice->getDeviceId() );
// Set the current device to the default input unit.
err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &nativeDeviceId, sizeof(AudioDeviceID) );
if( err != noErr ) {
std::cout << "failed to set AU input device" << std::endl;
throw;
}
#endif
AURenderCallbackStruct callback;
callback.inputProc = InputImplAudioUnit::inputCallback;
callback.inputProcRefCon = this;
err = AudioUnitSetProperty( mInputUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(AURenderCallbackStruct) );
//Don't setup buffers until you know what the
//input and output device audio streams look like.
// Initialize the AudioUnit
err = AudioUnitInitialize( mInputUnit );
if(err != noErr) {
std::cout << "failed to initialize HAL Output AU" << std::endl;
throw;
}
//Get Size of IO Buffers
uint32_t sampleCount;
param = sizeof(UInt32);
#if defined( CINDER_MAC )
err = AudioUnitGetProperty( mInputUnit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, 0, &sampleCount, ¶m );
if( err != noErr ) {
std::cout << "Error getting buffer frame size" << std::endl;
throw;
}
#elif defined( CINDER_COCOA_TOUCH )
AudioUnitGetProperty( mInputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &sampleCount, ¶m );
//.........这里部分代码省略.........
示例13: input_init
//.........这里部分代码省略.........
size = sizeof(AudioDeviceID);
AudioDeviceID inputDevice;
err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,
&size,
&inputDevice);
if (err)
exit(err);
err =AudioUnitSetProperty(auHAL,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global,
0,
&inputDevice,
sizeof(inputDevice));
if (err)
exit(err);
AudioStreamBasicDescription DeviceFormat;
AudioStreamBasicDescription DesiredFormat;
//Use CAStreamBasicDescriptions instead of 'naked'
//AudioStreamBasicDescriptions to minimize errors.
//CAStreamBasicDescription.h can be found in the CoreAudio SDK.
size = sizeof(AudioStreamBasicDescription);
//Get the input device format
AudioUnitGetProperty (auHAL,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
1,
&DeviceFormat,
&size);
//set the desired format to the device's sample rate
memcpy(&DesiredFormat, &DeviceFormat, sizeof(AudioStreamBasicDescription));
sampling_rate = DeviceFormat.mSampleRate; // for laser-emulating filters
DesiredFormat.mSampleRate = DeviceFormat.mSampleRate;
DesiredFormat.mChannelsPerFrame = 4;
DesiredFormat.mBitsPerChannel = 16;
DesiredFormat.mBytesPerPacket = DesiredFormat.mBytesPerFrame =
DesiredFormat.mChannelsPerFrame * 2;
DesiredFormat.mFramesPerPacket = 1;
DesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
//set format to output scope
err = AudioUnitSetProperty(
auHAL,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
&DesiredFormat,
sizeof(AudioStreamBasicDescription));
if (err)
exit(err);
SInt32 *channelMap =NULL;
UInt32 numOfChannels = DesiredFormat.mChannelsPerFrame; //2 channels
UInt32 mapSize = numOfChannels *sizeof(SInt32);
channelMap = (SInt32 *)malloc(mapSize);
//for each channel of desired input, map the channel from
//the device's output channel.
for(i=0;i<numOfChannels;i++)
{
channelMap[i]=i;
}
err = AudioUnitSetProperty(auHAL,
kAudioOutputUnitProperty_ChannelMap,
kAudioUnitScope_Output,
1,
channelMap,
size);
if (err)
exit(err);
free(channelMap);
AURenderCallbackStruct input;
input.inputProc = callback;
input.inputProcRefCon = 0;
err = AudioUnitSetProperty(
auHAL,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&input,
sizeof(input));
if (err)
exit(err);
err = AudioUnitInitialize(auHAL);
if (err)
exit(err);
err = AudioOutputUnitStart(auHAL);
if (err)
exit(err);
}
示例14: cubeb_stream_init
//.........这里部分代码省略.........
latency < 1 || latency > 2000) {
return CUBEB_ERROR_INVALID_FORMAT;
}
memset(&ss, 0, sizeof(ss));
ss.mFormatFlags = 0;
switch (stream_params.format) {
case CUBEB_SAMPLE_S16LE:
ss.mBitsPerChannel = 16;
ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger;
break;
case CUBEB_SAMPLE_S16BE:
ss.mBitsPerChannel = 16;
ss.mFormatFlags |= kAudioFormatFlagIsSignedInteger |
kAudioFormatFlagIsBigEndian;
break;
case CUBEB_SAMPLE_FLOAT32LE:
ss.mBitsPerChannel = 32;
ss.mFormatFlags |= kAudioFormatFlagIsFloat;
break;
case CUBEB_SAMPLE_FLOAT32BE:
ss.mBitsPerChannel = 32;
ss.mFormatFlags |= kAudioFormatFlagIsFloat |
kAudioFormatFlagIsBigEndian;
break;
default:
return CUBEB_ERROR_INVALID_FORMAT;
}
ss.mFormatID = kAudioFormatLinearPCM;
ss.mFormatFlags |= kLinearPCMFormatFlagIsPacked;
ss.mSampleRate = stream_params.rate;
ss.mChannelsPerFrame = stream_params.channels;
ss.mBytesPerFrame = (ss.mBitsPerChannel / 8) * ss.mChannelsPerFrame;
ss.mFramesPerPacket = 1;
ss.mBytesPerPacket = ss.mBytesPerFrame * ss.mFramesPerPacket;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_DefaultOutput;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
comp = FindNextComponent(NULL, &desc);
assert(comp);
stm = calloc(1, sizeof(*stm));
assert(stm);
stm->data_callback = data_callback;
stm->state_callback = state_callback;
stm->user_ptr = user_ptr;
stm->sample_spec = ss;
r = pthread_mutex_init(&stm->mutex, NULL);
assert(r == 0);
stm->frames_played = 0;
stm->frames_queued = 0;
r = OpenAComponent(comp, &stm->unit);
if (r != 0) {
fprintf(stderr, "cubeb_audiounit: FATAL: OpenAComponent returned %ld\n", (long) r);
}
assert(r == 0);
input.inputProc = audio_unit_output_callback;
input.inputProcRefCon = stm;
r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global, 0, &input, sizeof(input));
if (r != 0) {
fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(SetRenderCallback) returned %ld\n", (long) r);
}
assert(r == 0);
r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
0, &ss, sizeof(ss));
if (r != 0) {
fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitSetProperty(StreamFormat) returned %ld\n", (long) r);
}
assert(r == 0);
buffer_size = ss.mSampleRate / 1000.0 * latency * ss.mBytesPerFrame / NBUFS;
if (buffer_size % ss.mBytesPerFrame != 0) {
buffer_size += ss.mBytesPerFrame - (buffer_size % ss.mBytesPerFrame);
}
assert(buffer_size % ss.mBytesPerFrame == 0);
r = AudioUnitInitialize(stm->unit);
if (r != 0) {
fprintf(stderr, "cubeb_audiounit: FATAL: AudioUnitInitialize returned %ld\n", (long) r);
}
assert(r == 0);
*stream = stm;
return CUBEB_OK;
}
示例15: audiounit_stream_init
//.........这里部分代码省略.........
#endif
assert(comp);
stm = calloc(1, sizeof(*stm));
assert(stm);
stm->context = context;
stm->data_callback = data_callback;
stm->state_callback = state_callback;
stm->user_ptr = user_ptr;
stm->sample_spec = ss;
r = pthread_mutex_init(&stm->mutex, NULL);
assert(r == 0);
stm->frames_played = 0;
stm->frames_queued = 0;
stm->current_latency_frames = 0;
stm->hw_latency_frames = UINT64_MAX;
#if MAC_OS_X_VERSION_MIN_REQUIRED < 1060
r = OpenAComponent(comp, &stm->unit);
#else
r = AudioComponentInstanceNew(comp, &stm->unit);
#endif
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
input.inputProc = audiounit_output_callback;
input.inputProcRefCon = stm;
r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global, 0, &input, sizeof(input));
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
buffer_size = latency / 1000.0 * ss.mSampleRate;
/* Get the range of latency this particular device can work with, and clamp
* the requested latency to this acceptable range. */
if (audiounit_get_acceptable_latency_range(&latency_range) != CUBEB_OK) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
if (buffer_size < (unsigned int) latency_range.mMinimum) {
buffer_size = (unsigned int) latency_range.mMinimum;
} else if (buffer_size > (unsigned int) latency_range.mMaximum) {
buffer_size = (unsigned int) latency_range.mMaximum;
}
/**
* Get the default buffer size. If our latency request is below the default,
* set it. Otherwise, use the default latency.
**/
size = sizeof(default_buffer_size);
r = AudioUnitGetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Output, 0, &default_buffer_size, &size);
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
// Setting the latency doesn't work well for USB headsets (eg. plantronics).
// Keep the default latency for now.
#if 0
if (buffer_size < default_buffer_size) {
/* Set the maximum number of frame that the render callback will ask for,
* effectively setting the latency of the stream. This is process-wide. */
r = AudioUnitSetProperty(stm->unit, kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Output, 0, &buffer_size, sizeof(buffer_size));
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
}
#endif
r = AudioUnitSetProperty(stm->unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
0, &ss, sizeof(ss));
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
r = AudioUnitInitialize(stm->unit);
if (r != 0) {
audiounit_stream_destroy(stm);
return CUBEB_ERROR;
}
*stream = stm;
return CUBEB_OK;
}