本文整理汇总了C++中AudioParameter::add方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioParameter::add方法的具体用法?C++ AudioParameter::add怎么用?C++ AudioParameter::add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioParameter
的用法示例。
在下文中一共展示了AudioParameter::add方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getParameters
String8 AudioALSAStreamOut::getParameters(const String8 &keys)
{
#ifdef MTK_DYNAMIC_CHANGE_HAL_BUFFER_SIZE
ALOGD("%s, keyvalue %s", __FUNCTION__, keys.string());
String8 value;
String8 keyLowLatency = String8("LowLatency");
AudioParameter param = AudioParameter(keys);
AudioParameter returnParam = AudioParameter();
if (param.get(keyLowLatency, value) == NO_ERROR)
{
param.remove(keyLowLatency);
char buf[10];
sprintf(buf, "%d", LOW_LATENCY_HAL_BUFFER_SIZE);
returnParam.add(keyLowLatency, String8(buf));
}
const String8 keyValuePairs = returnParam.toString();
ALOGD("-%s(), return \"%s\"", __FUNCTION__, keyValuePairs.string());
return keyValuePairs;
#else
ALOGD("%s()", __FUNCTION__);
AudioParameter param = AudioParameter(keys);
return param.toString();
#endif
}
示例2: startInput
status_t AudioPolicyManager::startInput(audio_io_handle_t input)
{
status_t status = AudioPolicyManagerBase::startInput(input);
if (status == NO_ERROR) {
AudioInputDescriptor *inputDesc = mInputs.valueFor(input);
String8 key = String8("Input Source");
String8 value;
switch(inputDesc->mInputSource) {
case AUDIO_SOURCE_VOICE_RECOGNITION:
value = String8("Voice Recognition");
break;
case AUDIO_SOURCE_CAMCORDER:
value = String8("Camcorder");
break;
case AUDIO_SOURCE_DEFAULT:
case AUDIO_SOURCE_MIC:
value = String8("Default");
default:
break;
}
AudioParameter param = AudioParameter();
param.add(key, value);
mpClientInterface->setParameters(input, param.toString());
}
return status;
}
示例3: releaseOutput
void AudioPolicyManagerSPRD::releaseOutput(audio_io_handle_t output)
{
#ifdef DUMP_DEBUG
ALOGD("releaseOutput() %d", output);
#endif
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("releaseOutput() releasing unknown output %d", output);
return;
}
if(is_voip_set) {
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
if(outputDesc->mRefCount[AudioSystem::VOICE_CALL] == 0) {
AudioParameter param;
param.add(String8("sprd_voip_start"), String8("false"));
mpClientInterface->setParameters(0, param.toString());
is_voip_set = false;
}
}
#ifdef AUDIO_POLICY_TEST
int testIndex = testOutputIndex(output);
if (testIndex != 0) {
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
if (outputDesc->isActive()) {
mpClientInterface->closeOutput(output);
delete mOutputs.valueAt(index);
mOutputs.removeItem(output);
mTestOutputs[testIndex] = 0;
}
return;
}
#endif //AUDIO_POLICY_TEST
AudioOutputDescriptor *desc = mOutputs.valueAt(index);
if (desc->mFlags & AudioSystem::OUTPUT_FLAG_DIRECT) {
if (desc->mDirectOpenCount <= 0) {
ALOGW("releaseOutput() invalid open count %d for output %d",
desc->mDirectOpenCount, output);
return;
}
if (--desc->mDirectOpenCount == 0) {
closeOutput(output);
// If effects where present on the output, audioflinger moved them to the primary
// output by default: move them back to the appropriate output.
audio_io_handle_t dstOutput = getOutputForEffect();
if (dstOutput != mPrimaryOutput) {
mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mPrimaryOutput, dstOutput);
}
}
}
}
示例4: AudioParameter
String8 A2dpAudioInterface::A2dpAudioStreamOut::getParameters(const String8& keys)
{
AudioParameter param = AudioParameter(keys);
String8 value;
String8 key = String8("a2dp_sink_address");
if (param.get(key, value) == NO_ERROR) {
value = mA2dpAddress;
param.add(key, value);
}
key = AudioParameter::keyRouting;
if (param.get(key, value) == NO_ERROR) {
param.addInt(key, (int)mDevice);
}
LOGV("A2dpAudioStreamOut::getParameters() %s", param.toString().string());
return param.toString();
}
示例5: startOutput
status_t AudioPolicyManagerSPRD::startOutput(audio_io_handle_t output,
AudioSystem::stream_type stream,
int session)
{
#ifdef DUMP_DEBUG
ALOGD("startOutput() output %d, stream %d, session %d", output, stream, session);
#endif
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("startOutput() unknow output %d", output);
return BAD_VALUE;
}
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
// increment usage count for this stream on the requested output:
// NOTE that the usage count is the same for duplicated output and hardware output which is
// necessary for a correct control of hardware output routing by startOutput() and stopOutput()
outputDesc->changeRefCount(stream, 1);
#ifdef DUMP_DEBUG
ALOGD("startOutput() is_voip_set %d,stream %d,",is_voip_set,stream);
#endif
if((!is_voip_set)&&(stream == AudioSystem::VOICE_CALL)) {
for (size_t i = 0; i < mOutputs.size(); i++) {
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
#ifdef DUMP_DEBUG
ALOGD("startOutput() outputDesc->mRefCount[AudioSystem::VOICE_CALL] %d",outputDesc->mRefCount[AudioSystem::VOICE_CALL]);
#endif
if(outputDesc->mRefCount[AudioSystem::VOICE_CALL] == 1) {
AudioParameter param;
param.add(String8("sprd_voip_start"), String8("true"));
mpClientInterface->setParameters(0, param.toString());
is_voip_set = true;
}
}
}
if (outputDesc->mRefCount[stream] == 1) {
audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/);
routing_strategy strategy = getStrategy(stream);
bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
(strategy == STRATEGY_SONIFICATION_RESPECTFUL);
uint32_t waitMs = 0;
bool force = false;
for (size_t i = 0; i < mOutputs.size(); i++) {
AudioOutputDescriptor *desc = mOutputs.valueAt(i);
if (desc != outputDesc) {
// force a device change if any other output is managed by the same hw
// module and has a current device selection that differs from selected device.
// In this case, the audio HAL must receive the new device selection so that it can
// change the device currently selected by the other active output.
if (outputDesc->sharesHwModuleWith(desc) &&
desc->device() != newDevice) {
force = true;
}
// wait for audio on other active outputs to be presented when starting
// a notification so that audio focus effect can propagate.
uint32_t latency = desc->latency();
if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
waitMs = latency;
}
}
}
uint32_t muteWaitMs = setOutputDevice(output, newDevice, force);
// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, true, false);
}
// apply volume rules for current stream and device if necessary
// filter devices according to output selected
if(!outputDesc->isDuplicated())
newDevice = (audio_devices_t)(newDevice & outputDesc->mProfile->mSupportedDevices);
ALOGW("startOutput() select newDevice %d", newDevice);
checkAndSetVolume(stream,
mStreams[stream].getVolumeIndex(newDevice),
output,
newDevice);
// update the outputs if starting an output with a stream that can affect notification
// routing
handleNotificationRoutingForStream(stream);
if (waitMs > muteWaitMs) {
usleep((waitMs - muteWaitMs) * 2 * 1000);
}
}
return NO_ERROR;
}
示例6: stopOutput
status_t AudioPolicyManagerSPRD::stopOutput(audio_io_handle_t output,
AudioSystem::stream_type stream,
int session)
{
#ifdef DUMP_DEBUG
ALOGD("stopOutput() output %d, stream %d, session %d", output, stream, session);
#endif
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("stopOutput() unknow output %d", output);
return BAD_VALUE;
}
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, false, false);
}
#ifdef DUMP_DEBUG
ALOGD("stopOutput() is_voip_set %d,stream %d,output size %d",is_voip_set,stream,mOutputs.size());
#endif
if(is_voip_set &&(stream == AudioSystem::VOICE_CALL)) {
for (size_t i = 0; i < mOutputs.size(); i++) {
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
#ifdef DUMP_DEBUG
ALOGD("stopOutput() outputDesc->mRefCount[AudioSystem::VOICE_CALL] %d",outputDesc->mRefCount[AudioSystem::VOICE_CALL]);
#endif
if(outputDesc->mRefCount[AudioSystem::VOICE_CALL] == 1) {
AudioParameter param;
param.add(String8("sprd_voip_start"), String8("false"));
mpClientInterface->setParameters(0, param.toString());
is_voip_set = false;
}
}
}
if (outputDesc->mRefCount[stream] > 0) {
// decrement usage count of this stream on the output
outputDesc->changeRefCount(stream, -1);
// store time at which the stream was stopped - see isStreamActive()
if (outputDesc->mRefCount[stream] == 0) {
outputDesc->mStopTime[stream] = systemTime();
audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/);
// delay the device switch by twice the latency because stopOutput() is executed when
// the track stop() command is received and at that time the audio track buffer can
// still contain data that needs to be drained. The latency only covers the audio HAL
// and kernel buffers. Also the latency does not always include additional delay in the
// audio path (audio DSP, CODEC ...)
setOutputDevice(output, newDevice, false, outputDesc->mLatency*2);
// force restoring the device selection on other active outputs if it differs from the
// one being selected for this output
for (size_t i = 0; i < mOutputs.size(); i++) {
audio_io_handle_t curOutput = mOutputs.keyAt(i);
AudioOutputDescriptor *desc = mOutputs.valueAt(i);
if (curOutput != output &&
desc->isActive() &&
outputDesc->sharesHwModuleWith(desc) &&
(newDevice != desc->device())) {
setOutputDevice(curOutput,
getNewDevice(curOutput, false /*fromCache*/),
true,
outputDesc->mLatency*2);
}
}
// update the outputs if stopping one with a stream that can affect notification routing
handleNotificationRoutingForStream(stream);
}
return NO_ERROR;
} else {
ALOGW("stopOutput() refcount is already 0 for output %d", output);
return INVALID_OPERATION;
}
}