本文整理汇总了C++中PositionalAudioStream::getType方法的典型用法代码示例。如果您正苦于以下问题:C++ PositionalAudioStream::getType方法的具体用法?C++ PositionalAudioStream::getType怎么用?C++ PositionalAudioStream::getType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PositionalAudioStream
的用法示例。
在下文中一共展示了PositionalAudioStream::getType方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeGain
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
// injector: apply attenuation
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
// avatar: apply fixed off-axis attenuation to make them quieter as they turn away
} else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
// source directivity is based on angle of emission, in local coordinates
glm::vec3 direction = glm::normalize(rotatedListenerPosition);
float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));
gain *= offAxisCoefficient;
// apply master gain, only to avatars
gain *= listenerNodeData.getMasterAvatarGain();
}
auto& audioZones = AudioMixer::getAudioZones();
auto& zoneSettings = AudioMixer::getZoneSettings();
// find distance attenuation coefficient
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
for (int i = 0; i < zoneSettings.length(); ++i) {
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
break;
}
}
// distance attenuation
const float ATTENUATION_START_DISTANCE = 1.0f;
float distance = glm::length(relativePosition);
assert(ATTENUATION_START_DISTANCE > EPSILON);
if (distance >= ATTENUATION_START_DISTANCE) {
// translate the zone setting to gain per log2(distance)
float g = 1.0f - attenuationPerDoublingInDistance;
g = glm::clamp(g, EPSILON, 1.0f);
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));
// multiply the current attenuation coefficient by the distance coefficient
gain *= distanceCoefficient;
}
return gain;
}
示例2: gainForSource
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd,
const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
float distanceBetween = glm::length(relativePosition);
if (distanceBetween < EPSILON) {
distanceBetween = EPSILON;
}
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
}
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
// multiply the current attenuation coefficient by the calculated off axis coefficient
gain *= offAxisCoefficient;
}
float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
for (int i = 0; i < _zonesSettings.length(); ++i) {
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) &&
_audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = _zonesSettings[i].coefficient;
break;
}
}
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
// translate the zone setting to gain per log2(distance)
float g = 1.0f - attenuationPerDoublingInDistance;
g = (g < EPSILON) ? EPSILON : g;
g = (g > 1.0f) ? 1.0f : g;
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = exp2f(log2f(g) * log2f(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
// multiply the current attenuation coefficient by the distance coefficient
gain *= distanceCoefficient;
}
return gain;
}
示例3: approximateGain
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
float gain = 1.0f;
// injector: apply attenuation
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
}
// avatar: skip attenuation - it is too costly to approximate
// distance attenuation: approximate, ignore zone-specific attenuations
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
float distance = glm::length(relativePosition);
return gain / distance;
// avatar: skip master gain - it is constant for all streams
}
示例4: approximateGain
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
const glm::vec3& relativePosition) {
float gain = 1.0f;
// injector: apply attenuation
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
}
// avatar: skip attenuation - it is too costly to approximate
// distance attenuation: approximate, ignore zone-specific attenuations
// this is a good approximation for streams further than ATTENUATION_START_DISTANCE
// those streams closer will be amplified; amplifying close streams is acceptable
// when throttling, as close streams are expected to be heard by a user
float distance = glm::length(relativePosition);
return gain / distance;
// avatar: skip master gain - it is constant for all streams
}
示例5: removeDeadInjectedStreams
void AudioMixerClientData::removeDeadInjectedStreams() {
const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100;
// we have this second threshold in case the injected audio is so short that the injected stream
// never even reaches its desired size, which means it will never start.
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
QHash<QUuid, PositionalAudioStream*>::Iterator i = _audioStreams.begin(), end = _audioStreams.end();
while (i != end) {
PositionalAudioStream* audioStream = i.value();
if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
delete audioStream;
i = _audioStreams.erase(i);
continue;
}
}
++i;
}
}
示例6: prepareMixForListeningNode
int AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_preMixSamples, 0, sizeof(_preMixSamples));
memset(_mixSamples, 0, sizeof(_mixSamples));
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
QUuid streamUUID = i.key();
if (otherNodeStream->getType() == PositionalAudioStream::Microphone) {
streamUUID = otherNode->getUUID();
}
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(listenerNodeData, streamUUID,
otherNodeStream, nodeAudioStream);
}
}
}
});
return streamsMixed;
}
示例7: computeGain
float computeGain(float masterListenerGain, const AvatarAudioStream& listeningNodeStream,
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, float distance, bool isEcho) {
float gain = 1.0f;
// injector: apply attenuation
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
// avatar: apply fixed off-axis attenuation to make them quieter as they turn away
} else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
// source directivity is based on angle of emission, in local coordinates
glm::vec3 direction = glm::normalize(rotatedListenerPosition);
float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));
gain *= offAxisCoefficient;
// apply master gain, only to avatars
gain *= masterListenerGain;
}
auto& audioZones = AudioMixer::getAudioZones();
auto& zoneSettings = AudioMixer::getZoneSettings();
// find distance attenuation coefficient
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
for (const auto& settings : zoneSettings) {
if (audioZones[settings.source].area.contains(streamToAdd.getPosition()) &&
audioZones[settings.listener].area.contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = settings.coefficient;
break;
}
}
if (attenuationPerDoublingInDistance < 0.0f) {
// translate a negative zone setting to distance limit
const float MIN_DISTANCE_LIMIT = ATTN_DISTANCE_REF + 1.0f; // silent after 1m
float distanceLimit = std::max(-attenuationPerDoublingInDistance, MIN_DISTANCE_LIMIT);
// calculate the LINEAR attenuation using the distance to this node
// reference attenuation of 0dB at distance = ATTN_DISTANCE_REF
float d = distance - ATTN_DISTANCE_REF;
gain *= std::max(1.0f - d / (distanceLimit - ATTN_DISTANCE_REF), 0.0f);
gain = std::min(gain, ATTN_GAIN_MAX);
} else {
// translate a positive zone setting to gain per log2(distance)
const float MIN_ATTENUATION_COEFFICIENT = 0.001f; // -60dB per log2(distance)
float g = glm::clamp(1.0f - attenuationPerDoublingInDistance, MIN_ATTENUATION_COEFFICIENT, 1.0f);
// calculate the LOGARITHMIC attenuation using the distance to this node
// reference attenuation of 0dB at distance = ATTN_DISTANCE_REF
float d = (1.0f / ATTN_DISTANCE_REF) * std::max(distance, HRTF_NEARFIELD_MIN);
gain *= fastExp2f(fastLog2f(g) * fastLog2f(d));
gain = std::min(gain, ATTN_GAIN_MAX);
}
return gain;
}