本文整理汇总了C++中AudioUnitGetProperty函数的典型用法代码示例。如果您正苦于以下问题:C++ AudioUnitGetProperty函数的具体用法?C++ AudioUnitGetProperty怎么用?C++ AudioUnitGetProperty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AudioUnitGetProperty函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sizeof
std::vector<AudioStreamBasicDescription> ofxAudioUnitMatrixMixer::getASBDs() const
{
std::vector<AudioStreamBasicDescription> ASBDs;
AudioStreamBasicDescription inputASBD, outputASBD;
UInt32 inputSize, outputSize;
inputSize = outputSize = sizeof(UInt32);
OFXAU_PRINT(AudioUnitGetProperty(*_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&inputASBD,
&inputSize),
"getting matrix mixer input ASBD");
OFXAU_PRINT(AudioUnitGetProperty(*_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&outputASBD,
&outputSize),
"getting matrix mixer output ASBD");
ASBDs.push_back(inputASBD);
ASBDs.push_back(outputASBD);
return ASBDs;
}
示例2: Init
// get the fast dispatch pointers
void Init()
{
UInt32 size = sizeof(AudioUnitRenderProc);
if (AudioUnitGetProperty(mUnit, kAudioUnitProperty_FastDispatch,
kAudioUnitScope_Global, kAudioUnitRenderSelect,
&mRenderProc, &size) != noErr)
mRenderProc = NULL;
if (AudioUnitGetProperty(mUnit, kAudioUnitProperty_FastDispatch,
kAudioUnitScope_Global, kAudioUnitGetParameterSelect,
&mGetParamProc, &size) != noErr)
mGetParamProc = NULL;
if (AudioUnitGetProperty(mUnit, kAudioUnitProperty_FastDispatch,
kAudioUnitScope_Global, kAudioUnitSetParameterSelect,
&mSetParamProc, &size) != noErr)
mSetParamProc = NULL;
if (AudioUnitGetProperty(mUnit, kAudioUnitProperty_FastDispatch,
kAudioUnitScope_Global, kMusicDeviceMIDIEventSelect,
&mMIDIEventProc, &size) != noErr)
mMIDIEventProc = NULL;
if (mRenderProc || mGetParamProc || mSetParamProc || mMIDIEventProc)
mConnInstanceStorage = GetComponentInstanceStorage(mUnit);
else
mConnInstanceStorage = NULL;
}
示例3: sizeof
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// AUMixer3DView::SetRenderingFlagsCheckboxes
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
void AUMixer3DView::SetRenderingFlagsCheckboxes()
{
// set the check boxes according to the current rendering flags
UInt32 flags;
UInt32 size = sizeof(flags);
AudioUnitGetProperty(
GetEditAudioUnit(),
kAudioUnitProperty_3DMixerRenderingFlags,
kAudioUnitScope_Input,
0,
&flags,
&size );
UInt32 usesReverb = 0;
AudioUnitGetProperty(
GetEditAudioUnit(),
kAudioUnitProperty_UsesInternalReverb,
kAudioUnitScope_Input,
0,
&usesReverb,
&size );
SetCheckbox('atr0', 1, flags & k3DMixerRenderingFlags_InterAuralDelay );
SetCheckbox('atr1', 2, flags & k3DMixerRenderingFlags_DopplerShift );
SetCheckbox('atr2', 3, flags & k3DMixerRenderingFlags_DistanceAttenuation );
SetCheckbox('atr3', 4, flags & k3DMixerRenderingFlags_DistanceFilter );
SetCheckbox('atr4', 5, flags & k3DMixerRenderingFlags_DistanceDiffusion );
SetCheckbox('rvrb', 6, usesReverb );
::UpdateControls(GetCarbonWindow(), NULL );
}
示例4: PrintMatrixMixerVolumes
void PrintMatrixMixerVolumes (FILE* file, AudioUnit au)
{
UInt32 dims[2];
UInt32 theSize = sizeof(UInt32) * 2;
Float32 *theVols = NULL;
OSStatus result;
// this call will fail if the unit is NOT initialized as it would present an incomplete state
ca_require_noerr (result = AudioUnitGetProperty (au, kAudioUnitProperty_MatrixDimensions,
kAudioUnitScope_Global, 0, dims, &theSize), home);
theSize = ((dims[0] + 1) * (dims[1] + 1)) * sizeof(Float32);
theVols = static_cast<Float32*> (malloc (theSize));
ca_require_noerr (result = AudioUnitGetProperty (au, kAudioUnitProperty_MatrixLevels,
kAudioUnitScope_Global, 0, theVols, &theSize), home);
home:
if (result) {
if (theVols)
free(theVols);
return;
}
theSize /= sizeof(Float32);
unsigned int inputs = dims[0];
unsigned int outputs = dims[1];
fprintf (file, "\tInput Channels = %d, Output Channels = %d\n", (int)dims[0], (int)dims[1]);
PrintBuses (file, "Input", au, kAudioUnitScope_Input);
PrintBuses (file, "Output", au, kAudioUnitScope_Output);
fprintf (file, "\tGlobal Volume: %.3f\n", theVols [theSize - 1]);
for (unsigned int i = 0; i < (inputs + 1); ++i) {
if (i < inputs) {
fprintf (file, "\t%.3f ", theVols[(i + 1) * (outputs + 1) - 1]);
for (unsigned int j = 0; j < outputs; ++j)
fprintf (file, "(%.3f) ", theVols[(i * (outputs + 1)) + j]);
} else {
fprintf (file, "\t ");
for (unsigned int j = 0; j < outputs; ++j)
fprintf (file, " %.3f ", theVols[(i * (outputs + 1)) + j]);
}
fprintf (file, "\n");
}
#if 0
for (unsigned int i = 0; i < theSize; ++i)
printf ("%f, ", theVols[i]);
#endif
free(theVols);
}
示例5: notification
static OSStatus notification(AudioDeviceID inDevice,
UInt32 inChannel,
Boolean isInput,
AudioDevicePropertyID inPropertyID,
void* inClientData)
{
coreaudio_driver_t* driver = (coreaudio_driver_t*)inClientData;
switch (inPropertyID) {
case kAudioDeviceProcessorOverload:
driver->xrun_detected = 1;
break;
case kAudioDevicePropertyNominalSampleRate: {
UInt32 outSize = sizeof(Float64);
Float64 sampleRate;
AudioStreamBasicDescription srcFormat, dstFormat;
OSStatus err = AudioDeviceGetProperty(driver->device_id, 0, kAudioDeviceSectionGlobal, kAudioDevicePropertyNominalSampleRate, &outSize, &sampleRate);
if (err != noErr) {
jack_error("Cannot get current sample rate");
return kAudioHardwareUnsupportedOperationError;
}
JCALog("JackCoreAudioDriver::NotificationCallback kAudioDevicePropertyNominalSampleRate %ld\n", (long)sampleRate);
outSize = sizeof(AudioStreamBasicDescription);
// Update SR for input
err = AudioUnitGetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormat, &outSize);
if (err != noErr) {
jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Input");
}
srcFormat.mSampleRate = sampleRate;
err = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormat, outSize);
if (err != noErr) {
jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Input");
}
// Update SR for output
err = AudioUnitGetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &dstFormat, &outSize);
if (err != noErr) {
jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Output");
}
dstFormat.mSampleRate = sampleRate;
err = AudioUnitSetProperty(driver->au_hal, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &dstFormat, outSize);
if (err != noErr) {
jack_error("Error calling AudioUnitSetProperty - kAudioUnitProperty_StreamFormat kAudioUnitScope_Output");
}
break;
}
}
return noErr;
}
示例6: sizeof
OSStatus CAAudioUnit::GetAUPreset (CFPropertyListRef &outData) const
{
UInt32 dataSize = sizeof(outData);
return AudioUnitGetProperty (AU(), kAudioUnitProperty_ClassInfo,
kAudioUnitScope_Global, 0,
&outData, &dataSize);
}
示例7: AudioUnitGetPropertyInfo
bool CCoreAudioUnit::GetSupportedChannelLayouts(AudioChannelLayoutList* pLayouts)
{
if (!m_audioUnit || !pLayouts)
return false;
UInt32 propSize = 0;
Boolean writable = false;
OSStatus ret = AudioUnitGetPropertyInfo(m_audioUnit,
kAudioUnitProperty_SupportedChannelLayoutTags, kAudioUnitScope_Input, 0, &propSize, &writable);
if (ret)
{
CLog::Log(LOGERROR, "CCoreAudioUnit::GetSupportedChannelLayouts: "
"Unable to retrieve supported channel layout property info. Error = %s", GetError(ret).c_str());
return false;
}
UInt32 layoutCount = propSize / sizeof(AudioChannelLayoutTag);
AudioChannelLayoutTag* pSuppLayouts = new AudioChannelLayoutTag[layoutCount];
ret = AudioUnitGetProperty(m_audioUnit,
kAudioUnitProperty_SupportedChannelLayoutTags, kAudioUnitScope_Output, 0, pSuppLayouts, &propSize);
if (ret)
{
CLog::Log(LOGERROR, "CCoreAudioUnit::GetSupportedChannelLayouts: "
"Unable to retrieve supported channel layouts. Error = %s", GetError(ret).c_str());
return false;
}
for (UInt32 layout = 0; layout < layoutCount; layout++)
pLayouts->push_back(pSuppLayouts[layout]);
delete[] pSuppLayouts;
return true;
}
示例8: sizeof
Float32 CAAUParameter::GetValueFromString(CFStringRef str) const
{
if (ValuesHaveStrings())
{
AudioUnitParameterValueFromString valueString;
valueString.inParamID = mParameterID;
valueString.inString = str;
UInt32 propertySize = sizeof(valueString);
OSStatus err = AudioUnitGetProperty (mAudioUnit,
kAudioUnitProperty_ParameterValueFromString,
mScope,
mParameterID,
&valueString,
&propertySize);
if (err == noErr) {
return valueString.outValue;
}
}
Float32 paramValue = mParamInfo.defaultValue;
char valstr[32];
CFStringGetCString(str, valstr, sizeof(valstr), kCFStringEncodingUTF8);
sscanf(valstr, "%f", ¶mValue);
return paramValue;
}
示例9: sizeof
// ----------------------------------------------------------
void ofxAudioUnitInput::connectTo(ofxAudioUnit &otherUnit, int destinationBus, int sourceBus)
// ----------------------------------------------------------
{
AURenderCallbackStruct callback;
callback.inputProc = pullCallback;
callback.inputProcRefCon = &_renderContext;
AudioStreamBasicDescription ASBD;
UInt32 ASBDSize = sizeof(ASBD);
OFXAU_RETURN(AudioUnitGetProperty(*otherUnit.getUnit(),
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
destinationBus,
&ASBD,
&ASBDSize),
"getting hardware input destination's format");
OFXAU_RETURN(AudioUnitSetProperty(*_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
&ASBD,
sizeof(ASBD)),
"setting hardware input's output format");
otherUnit.setRenderCallback(callback, destinationBus);
}
示例10: sizeof
// ----------------------------------------------------------
ofxAudioUnit& ofxAudioUnitInput::connectTo(ofxAudioUnit &otherUnit, int destinationBus, int sourceBus)
// ----------------------------------------------------------
{
AudioStreamBasicDescription ASBD;
UInt32 ASBDSize = sizeof(ASBD);
OFXAU_PRINT(AudioUnitGetProperty(otherUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
destinationBus,
&ASBD,
&ASBDSize),
"getting hardware input destination's format");
OFXAU_PRINT(AudioUnitSetProperty(*_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
&ASBD,
sizeof(ASBD)),
"setting hardware input's output format");
AURenderCallbackStruct callback = {PullCallback, &_impl->ctx};
otherUnit.setRenderCallback(callback, destinationBus);
return otherUnit;
}
示例11: AudioUnitSetProperty
void AudioDestinationIOS::configure()
{
// Set render callback
AURenderCallbackStruct input;
input.inputProc = inputProc;
input.inputProcRefCon = this;
OSStatus result = AudioUnitSetProperty(m_outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input));
ASSERT(!result);
// Set stream format
AudioStreamBasicDescription streamFormat;
UInt32 size = sizeof(AudioStreamBasicDescription);
result = AudioUnitGetProperty(m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, (void*)&streamFormat, &size);
ASSERT(!result);
const int bytesPerFloat = sizeof(Float32);
const int bitsPerByte = 8;
streamFormat.mSampleRate = m_sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = bytesPerFloat;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = bytesPerFloat;
streamFormat.mChannelsPerFrame = 2;
streamFormat.mBitsPerChannel = bitsPerByte * bytesPerFloat;
result = AudioUnitSetProperty(m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, (void*)&streamFormat, sizeof(AudioStreamBasicDescription));
ASSERT(!result);
AudioSession::sharedSession().setPreferredBufferSize(kPreferredBufferSize);
}
示例12: sizeof
const Boolean AudioUnitNode::doBypass() const
{
UInt32 doB;
UInt32 size = sizeof(UInt32);
AudioUnitGetProperty(mUnit, kAudioUnitProperty_BypassEffect, kAudioUnitScope_Global, 0, &doB, &size);
return doB == 1 ? true : false;
}
示例13: _impl
// ----------------------------------------------------------
ofxAudioUnitInput::ofxAudioUnitInput(unsigned int samplesToBuffer)
: _impl(new InputImpl)
// ----------------------------------------------------------
{
_desc = inputDesc;
initUnit();
AudioStreamBasicDescription ASBD = {0};
UInt32 ASBD_size = sizeof(ASBD);
OFXAU_PRINT(AudioUnitGetProperty(*_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
&ASBD,
&ASBD_size),
"getting input ASBD");
_impl->ctx.inputUnit = _unit;
_impl->ctx.bufferList = AudioBufferListRef(AudioBufferListAlloc(ASBD.mChannelsPerFrame, 1024), AudioBufferListRelease);
_impl->ctx.circularBuffers.resize(ASBD.mChannelsPerFrame);
_impl->isReady = false;
#if !TARGET_OS_IPHONE
_impl->inputDeviceID = DefaultAudioInputDevice();
#endif
for(int i = 0; i < ASBD.mChannelsPerFrame; i++) {
TPCircularBufferInit(&_impl->ctx.circularBuffers[i], samplesToBuffer * sizeof(Float32));
}
}
示例14: gst_core_audio_get_samples_and_latency_impl
static gboolean
gst_core_audio_get_samples_and_latency_impl (GstCoreAudio * core_audio,
gdouble rate, guint * samples, gdouble * latency)
{
OSStatus status;
UInt32 size = sizeof (double);
if (core_audio->is_passthrough) {
*samples = _audio_device_get_latency (core_audio->device_id);
*samples += _audio_stream_get_latency (core_audio->stream_id);
*latency = (double) *samples / rate;
} else {
status = AudioUnitGetProperty (core_audio->audiounit, kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, /* N/A for global */
latency, &size);
if (status) {
GST_WARNING_OBJECT (core_audio->osxbuf, "Failed to get latency: %d",
(int) status);
*samples = 0;
return FALSE;
}
*samples = *latency * rate;
}
return TRUE;
}
示例15: ca_get_hardware_latency
static int64_t ca_get_hardware_latency(struct ao *ao) {
struct priv *p = ao->priv;
double audiounit_latency_sec = 0.0;
uint32_t size = sizeof(audiounit_latency_sec);
OSStatus err = AudioUnitGetProperty(
p->audio_unit,
kAudioUnitProperty_Latency,
kAudioUnitScope_Global,
0,
&audiounit_latency_sec,
&size);
CHECK_CA_ERROR("cannot get audio unit latency");
uint32_t frames = 0;
err = CA_GET_O(p->device, kAudioDevicePropertyLatency, &frames);
CHECK_CA_ERROR("cannot get device latency");
uint64_t audiounit_latency_us = audiounit_latency_sec * 1e6;
uint64_t device_latency_us = ca_frames_to_us(ao, frames);
MP_VERBOSE(ao, "audiounit latency [us]: %lld\n", audiounit_latency_us);
MP_VERBOSE(ao, "device latency [us]: %lld\n", device_latency_us);
return audiounit_latency_us + device_latency_us;
coreaudio_error:
return 0;
}