本文整理汇总了C++中AudioMixerClientData::getMasterAvatarGain方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioMixerClientData::getMasterAvatarGain方法的具体用法?C++ AudioMixerClientData::getMasterAvatarGain怎么用?C++ AudioMixerClientData::getMasterAvatarGain使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioMixerClientData
的用法示例。
在下文中一共展示了AudioMixerClientData::getMasterAvatarGain方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeGain
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
// injector: apply attenuation
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
// avatar: apply fixed off-axis attenuation to make them quieter as they turn away
} else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
// source directivity is based on angle of emission, in local coordinates
glm::vec3 direction = glm::normalize(rotatedListenerPosition);
float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));
gain *= offAxisCoefficient;
// apply master gain, only to avatars
gain *= listenerNodeData.getMasterAvatarGain();
}
auto& audioZones = AudioMixer::getAudioZones();
auto& zoneSettings = AudioMixer::getZoneSettings();
// find distance attenuation coefficient
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
for (int i = 0; i < zoneSettings.length(); ++i) {
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
break;
}
}
// distance attenuation
const float ATTENUATION_START_DISTANCE = 1.0f;
float distance = glm::length(relativePosition);
assert(ATTENUATION_START_DISTANCE > EPSILON);
if (distance >= ATTENUATION_START_DISTANCE) {
// translate the zone setting to gain per log2(distance)
float g = 1.0f - attenuationPerDoublingInDistance;
g = glm::clamp(g, EPSILON, 1.0f);
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));
// multiply the current attenuation coefficient by the distance coefficient
gain *= distanceCoefficient;
}
return gain;
}
示例2: prepareMix
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());
// zero out the mix for this listener
memset(_mixSamples, 0, sizeof(_mixSamples));
bool isThrottling = _numToRetain != -1;
bool isSoloing = !listenerData->getSoloedNodes().empty();
auto& streams = listenerData->getStreams();
addStreams(*listener, *listenerData);
// Process skipped streams
erase_if(streams.skipped, [&](MixableStream& stream) {
if (shouldBeRemoved(stream, _sharedData)) {
return true;
}
if (!shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
if (shouldBeInactive(stream)) {
streams.inactive.push_back(move(stream));
++stats.skippedToInactive;
} else {
streams.active.push_back(move(stream));
++stats.skippedToActive;
}
return true;
}
if (!isThrottling) {
updateHRTFParameters(stream, *listenerAudioStream,
listenerData->getMasterAvatarGain());
}
return false;
});
// Process inactive streams
erase_if(streams.inactive, [&](MixableStream& stream) {
if (shouldBeRemoved(stream, _sharedData)) {
return true;
}
if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
streams.skipped.push_back(move(stream));
++stats.inactiveToSkipped;
return true;
}
if (!shouldBeInactive(stream)) {
streams.active.push_back(move(stream));
++stats.inactiveToActive;
return true;
}
if (!isThrottling) {
updateHRTFParameters(stream, *listenerAudioStream,
listenerData->getMasterAvatarGain());
}
return false;
});
// Process active streams
erase_if(streams.active, [&](MixableStream& stream) {
if (shouldBeRemoved(stream, _sharedData)) {
return true;
}
if (isThrottling) {
// we're throttling, so we need to update the approximate volume for any un-skipped streams
// unless this is simply for an echo (in which case the approx volume is 1.0)
stream.approximateVolume = approximateVolume(stream, listenerAudioStream);
} else {
if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
addStream(stream, *listenerAudioStream, 0.0f, isSoloing);
streams.skipped.push_back(move(stream));
++stats.activeToSkipped;
return true;
}
addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain(),
isSoloing);
if (shouldBeInactive(stream)) {
// To reduce artifacts we still call render to flush the HRTF for every silent
// sources on the first frame where the source becomes silent
// this ensures the correct tail from last mixed block
streams.inactive.push_back(move(stream));
++stats.activeToInactive;
return true;
}
}
return false;
});
if (isThrottling) {
// since we're throttling, we need to partition the mixable into throttled and unthrottled streams
int numToRetain = min(_numToRetain, (int)streams.active.size()); // Make sure we don't overflow
//.........这里部分代码省略.........