本文整理汇总了C++中AudioMixerClientData类的典型用法代码示例。如果您正苦于以下问题:C++ AudioMixerClientData类的具体用法?C++ AudioMixerClientData怎么用?C++ AudioMixerClientData使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioMixerClientData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processPackets
void AudioMixerSlave::processPackets(const SharedNodePointer& node) {
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
if (data) {
// process packets and collect the number of streams available for this frame
stats.sumStreams += data->processPackets(_sharedData.addedStreams);
}
}
示例2: memset
int AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
// zero out the client mix for this node
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
}
}
}
}
return streamsMixed;
}
示例3: memset
void AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
// zero out the client mix for this node
memset(_clientSamples, 0, sizeof(_clientSamples));
// loop through all other nodes that have sufficient audio to mix
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
for (unsigned int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) {
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
if ((*otherNode != *node
|| otherNodeBuffer->shouldLoopbackForNode())
&& otherNodeBuffer->willBeAddedToMix()) {
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
}
}
}
}
}
示例4: printf
void AudioMixer::perSecondActions() {
_sendAudioStreamStats = true;
int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
_readPendingCallsPerSecondStats.update(callsLastSecond);
if (_printStreamStats) {
printf("\n================================================================================\n\n");
printf(" readPendingDatagram() calls per second | avg: %.2f, avg_30s: %.2f, last_second: %d\n",
_readPendingCallsPerSecondStats.getAverage(),
_readPendingCallsPerSecondStats.getWindowAverage(),
callsLastSecond);
printf(" Datagrams read per call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_datagramsReadPerCallStats.getAverage(),
_datagramsReadPerCallStats.getWindowAverage(),
_datagramsReadPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per readPendingDatagram() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerCallStats.getAverage(),
_timeSpentPerCallStats.getWindowAverage(),
_timeSpentPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per packetVersionAndHashMatch() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerHashMatchCallStats.getAverage(),
_timeSpentPerHashMatchCallStats.getWindowAverage(),
_timeSpentPerHashMatchCallStats.getCurrentIntervalAverage());
double WINDOW_LENGTH_USECS = READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND;
printf(" %% time spent in readPendingDatagram() calls | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
printf("%% time spent in packetVersionAndHashMatch() calls: | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerHashMatchCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerHashMatchCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
DependencyManager::get<NodeList>()->eachNode([](const SharedNodePointer& node) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
printf("\nStats for agent %s --------------------------------\n",
node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats();
}
}
});
}
_datagramsReadPerCallStats.currentIntervalComplete();
_timeSpentPerCallStats.currentIntervalComplete();
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
}
示例5: percentageForMixStats
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
statsObject["avg_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;
QJsonObject mixStats;
mixStats["%_hrtf_mixes"] = percentageForMixStats(_hrtfRenders);
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_hrtfSilentRenders);
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_hrtfStruggleRenders);
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_manualStereoMixes);
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_manualEchoMixes);
mixStats["total_mixes"] = _totalMixes;
mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;
statsObject["mix_stats"] = mixStats;
_sumListeners = 0;
_hrtfRenders = 0;
_hrtfSilentRenders = 0;
_hrtfStruggleRenders = 0;
_manualStereoMixes = 0;
_manualEchoMixes = 0;
_totalMixes = 0;
_numStatFrames = 0;
// add stats for each listerner
auto nodeList = DependencyManager::get<NodeList>();
QJsonObject listenerStats;
nodeList->eachNode([&](const SharedNodePointer& node) {
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (clientData) {
QJsonObject nodeStats;
QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());
nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;
nodeStats["jitter"] = clientData->getAudioStreamStats();
listenerStats[uuidString] = nodeStats;
}
});
// add the listeners object to the root object
statsObject["z_listeners"] = listenerStats;
// send off the stats packets
ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
示例6: sendSilentPacket
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
const int SILENT_PACKET_SIZE =
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
quint16 sequence = data.getOutgoingSequenceNumber();
QString codec = data.getCodecName();
auto mixPacket = createAudioPacket(PacketType::SilentAudioFrame, SILENT_PACKET_SIZE, sequence, codec);
// pack number of samples
mixPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
// send packet
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
data.incrementOutgoingMixedAudioSequenceNumber();
}
示例7: sendMixPacket
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
const int MIX_PACKET_SIZE =
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
quint16 sequence = data.getOutgoingSequenceNumber();
QString codec = data.getCodecName();
auto mixPacket = createAudioPacket(PacketType::MixedAudio, MIX_PACKET_SIZE, sequence, codec);
// pack samples
mixPacket->write(buffer.constData(), buffer.size());
// send packet
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
data.incrementOutgoingMixedAudioSequenceNumber();
}
示例8: sendMutePacket
void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData& data) {
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
DependencyManager::get<NodeList>()->sendPacket(std::move(mutePacket), *node);
// probably now we just reset the flag, once should do it (?)
data.setShouldMuteClient(false);
}
示例9: sendEnvironmentPacket
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
bool hasReverb = false;
float reverbTime, wetLevel;
auto& reverbSettings = AudioMixer::getReverbSettings();
auto& audioZones = AudioMixer::getAudioZones();
AvatarAudioStream* stream = data.getAvatarAudioStream();
glm::vec3 streamPosition = stream->getPosition();
// find reverb properties
for (int i = 0; i < reverbSettings.size(); ++i) {
AABox box = audioZones[reverbSettings[i].zone];
if (box.contains(streamPosition)) {
hasReverb = true;
reverbTime = reverbSettings[i].reverbTime;
wetLevel = reverbSettings[i].wetLevel;
break;
}
}
// check if data changed
bool dataChanged = (stream->hasReverb() != hasReverb) ||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel));
if (dataChanged) {
// update stream
if (hasReverb) {
stream->setReverb(reverbTime, wetLevel);
} else {
stream->clearReverb();
}
}
// send packet at change or every so often
float CHANCE_OF_SEND = 0.01f;
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
if (sendData) {
// size the packet
unsigned char bitset = 0;
int packetSize = sizeof(bitset);
if (hasReverb) {
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
}
// write the packet
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
if (hasReverb) {
setAtBit(bitset, HAS_REVERB_BIT);
}
envPacket->writePrimitive(bitset);
if (hasReverb) {
envPacket->writePrimitive(reverbTime);
envPacket->writePrimitive(wetLevel);
}
// send the packet
DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node);
}
}
示例10: mix
void AudioMixerSlave::mix(const SharedNodePointer& node) {
// check that the node is valid
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
if (data == nullptr) {
return;
}
if (node->isUpstream()) {
return;
}
// check that the stream is valid
auto avatarStream = data->getAvatarAudioStream();
if (avatarStream == nullptr) {
return;
}
// send mute packet, if necessary
if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) {
sendMutePacket(node, *data);
}
// send audio packets, if necessary
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
++stats.sumListeners;
// mix the audio
bool mixHasAudio = prepareMix(node);
// send audio packet
if (mixHasAudio || data->shouldFlushEncoder()) {
QByteArray encodedBuffer;
if (mixHasAudio) {
// encode the audio
QByteArray decodedBuffer(reinterpret_cast<char*>(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
data->encode(decodedBuffer, encodedBuffer);
} else {
// time to flush (resets shouldFlush until the next encode)
data->encodeFrameOfZeros(encodedBuffer);
}
sendMixPacket(node, *data, encodedBuffer);
} else {
++stats.sumListenersSilent;
sendSilentPacket(node, *data);
}
// send environment packet
sendEnvironmentPacket(node, *data);
// send stats packet (about every second)
const unsigned int NUM_FRAMES_PER_SEC = (int)ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) {
data->sendAudioStreamStatsPackets(node);
}
}
}
示例11: computeGain
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
// injector: apply attenuation
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
// avatar: apply fixed off-axis attenuation to make them quieter as they turn away
} else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
// source directivity is based on angle of emission, in local coordinates
glm::vec3 direction = glm::normalize(rotatedListenerPosition);
float angleOfDelivery = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + (angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));
gain *= offAxisCoefficient;
// apply master gain, only to avatars
gain *= listenerNodeData.getMasterAvatarGain();
}
auto& audioZones = AudioMixer::getAudioZones();
auto& zoneSettings = AudioMixer::getZoneSettings();
// find distance attenuation coefficient
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
for (int i = 0; i < zoneSettings.length(); ++i) {
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
break;
}
}
// distance attenuation
const float ATTENUATION_START_DISTANCE = 1.0f;
float distance = glm::length(relativePosition);
assert(ATTENUATION_START_DISTANCE > EPSILON);
if (distance >= ATTENUATION_START_DISTANCE) {
// translate the zone setting to gain per log2(distance)
float g = 1.0f - attenuationPerDoublingInDistance;
g = glm::clamp(g, EPSILON, 1.0f);
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));
// multiply the current attenuation coefficient by the distance coefficient
gain *= distanceCoefficient;
}
return gain;
}
示例12: memset
bool AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_mixedSamples, 0, sizeof(_mixedSamples));
// loop through all other nodes that have sufficient audio to mix
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
// make sure that we have audio data for this other node and that it isn't being ignored by our listening node
if (otherNode->getLinkedData() && !node->isIgnoringNodeWithID(otherNode->getUUID())) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
auto streamsCopy = otherNodeClientData->getAudioStreams();
for (auto& streamPair : streamsCopy) {
auto otherNodeStream = streamPair.second;
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
addStreamToMixForListeningNodeWithStream(*listenerNodeData, *otherNodeStream, otherNode->getUUID(),
*nodeAudioStream);
}
}
}
});
// use the per listner AudioLimiter to render the mixed data...
listenerNodeData->audioLimiter.render(_mixedSamples, _clampedSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// check for silent audio after the peak limitor has converted the samples
bool hasAudio = false;
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
if (_clampedSamples[i] != 0) {
hasAudio = true;
break;
}
}
return hasAudio;
}
示例13: memset
int AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_preMixSamples, 0, sizeof(_preMixSamples));
memset(_mixSamples, 0, sizeof(_mixSamples));
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
QUuid streamUUID = i.key();
if (otherNodeStream->getType() == PositionalAudioStream::Microphone) {
streamUUID = otherNode->getUUID();
}
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(listenerNodeData, streamUUID,
otherNodeStream, nodeAudioStream);
}
}
}
});
return streamsMixed;
}
示例14: shouldIgnore
bool AudioMixerClientData::shouldIgnore(const SharedNodePointer self, const SharedNodePointer node, unsigned int frame) {
// this is symmetric over self / node; if computed, it is cached in the other
// check the cache to avoid computation
auto& cache = _nodeSourcesIgnoreMap[node->getUUID()];
if (cache.isCached()) {
return cache.shouldIgnore();
}
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (!nodeData) {
return false;
}
// compute shouldIgnore
bool shouldIgnore = true;
if ( // the nodes are not ignoring each other explicitly (or are but get data regardless)
(!self->isIgnoringNodeWithID(node->getUUID()) ||
(nodeData->getRequestsDomainListData() && node->getCanKick())) &&
(!node->isIgnoringNodeWithID(self->getUUID()) ||
(getRequestsDomainListData() && self->getCanKick()))) {
// if either node is enabling an ignore radius, check their proximity
if ((self->isIgnoreRadiusEnabled() || node->isIgnoreRadiusEnabled())) {
auto& zone = _ignoreZone.get(frame);
auto& nodeZone = nodeData->_ignoreZone.get(frame);
shouldIgnore = zone.touches(nodeZone);
} else {
shouldIgnore = false;
}
}
// cache in node
nodeData->_nodeSourcesIgnoreMap[self->getUUID()].cache(shouldIgnore);
return shouldIgnore;
}
示例15: shouldBeSkipped
bool shouldBeSkipped(MixableStream& stream, const Node& listener,
const AvatarAudioStream& listenerAudioStream,
const AudioMixerClientData& listenerData) {
if (stream.nodeStreamID.nodeLocalID == listener.getLocalID()) {
return !stream.positionalStream->shouldLoopbackForNode();
}
// grab the unprocessed ignores and unignores from and for this listener
const auto& nodesIgnoredByListener = listenerData.getNewIgnoredNodeIDs();
const auto& nodesUnignoredByListener = listenerData.getNewUnignoredNodeIDs();
const auto& nodesIgnoringListener = listenerData.getNewIgnoringNodeIDs();
const auto& nodesUnignoringListener = listenerData.getNewUnignoringNodeIDs();
// this stream was previously not ignored by the listener and we have some newly ignored streams
// check now if it is one of the ignored streams and flag it as such
if (stream.ignoredByListener) {
stream.ignoredByListener = !contains(nodesUnignoredByListener, stream.nodeStreamID.nodeID);
} else {
stream.ignoredByListener = contains(nodesIgnoredByListener, stream.nodeStreamID.nodeID);
}
if (stream.ignoringListener) {
stream.ignoringListener = !contains(nodesUnignoringListener, stream.nodeStreamID.nodeID);
} else {
stream.ignoringListener = contains(nodesIgnoringListener, stream.nodeStreamID.nodeID);
}
bool listenerIsAdmin = listenerData.getRequestsDomainListData() && listener.getCanKick();
if (stream.ignoredByListener || (stream.ignoringListener && !listenerIsAdmin)) {
return true;
}
if (!listenerData.getSoloedNodes().empty()) {
return !contains(listenerData.getSoloedNodes(), stream.nodeStreamID.nodeID);
}
bool shouldCheckIgnoreBox = (listenerAudioStream.isIgnoreBoxEnabled() ||
stream.positionalStream->isIgnoreBoxEnabled());
if (shouldCheckIgnoreBox &&
listenerAudioStream.getIgnoreBox().touches(stream.positionalStream->getIgnoreBox())) {
return true;
}
return false;
};