本文整理汇总了C++中AudioBus::channel方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioBus::channel方法的具体用法?C++ AudioBus::channel怎么用?C++ AudioBus::channel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioBus
的用法示例。
在下文中一共展示了AudioBus::channel方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: copyFrom
void AudioBus::copyFrom(const AudioBus& sourceBus, ChannelInterpretation channelInterpretation)
{
if (&sourceBus == this)
return;
unsigned numberOfSourceChannels = sourceBus.numberOfChannels();
unsigned numberOfDestinationChannels = numberOfChannels();
if (numberOfDestinationChannels == numberOfSourceChannels) {
for (unsigned i = 0; i < numberOfSourceChannels; ++i)
channel(i)->copyFrom(sourceBus.channel(i));
} else {
switch (channelInterpretation) {
case Speakers:
speakersCopyFrom(sourceBus);
break;
case Discrete:
discreteCopyFrom(sourceBus);
break;
default:
ASSERT_NOT_REACHED();
}
}
}
示例2: process
void JavaScriptAudioNode::process(size_t framesToProcess)
{
// Discussion about inputs and outputs:
// As in other AudioNodes, JavaScriptAudioNode uses an AudioBus for its input and output (see inputBus and outputBus below).
// Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below).
// This node is the producer for inputBuffer and the consumer for outputBuffer.
// The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer.
// Get input and output busses.
AudioBus* inputBus = this->input(0)->bus();
AudioBus* outputBus = this->output(0)->bus();
// Get input and output buffers. We double-buffer both the input and output sides.
unsigned doubleBufferIndex = this->doubleBufferIndex();
bool isDoubleBufferIndexGood = doubleBufferIndex < 2 && doubleBufferIndex < m_inputBuffers.size() && doubleBufferIndex < m_outputBuffers.size();
ASSERT(isDoubleBufferIndexGood);
if (!isDoubleBufferIndexGood)
return;
AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
// Check the consistency of input and output buffers.
unsigned numberOfInputChannels = m_internalInputBus.numberOfChannels();
bool buffersAreGood = outputBuffer && bufferSize() == outputBuffer->length() && m_bufferReadWriteIndex + framesToProcess <= bufferSize();
// If the number of input channels is zero, it's ok to have inputBuffer = 0.
if (m_internalInputBus.numberOfChannels())
buffersAreGood = buffersAreGood && inputBuffer && bufferSize() == inputBuffer->length();
ASSERT(buffersAreGood);
if (!buffersAreGood)
return;
// We assume that bufferSize() is evenly divisible by framesToProcess - should always be true, but we should still check.
bool isFramesToProcessGood = framesToProcess && bufferSize() >= framesToProcess && !(bufferSize() % framesToProcess);
ASSERT(isFramesToProcessGood);
if (!isFramesToProcessGood)
return;
unsigned numberOfOutputChannels = outputBus->numberOfChannels();
bool channelsAreGood = (numberOfInputChannels == m_numberOfInputChannels) && (numberOfOutputChannels == m_numberOfOutputChannels);
ASSERT(channelsAreGood);
if (!channelsAreGood)
return;
for (unsigned i = 0; i < numberOfInputChannels; i++)
m_internalInputBus.setChannelMemory(i, inputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, framesToProcess);
if (numberOfInputChannels)
m_internalInputBus.copyFrom(*inputBus);
// Copy from the output buffer to the output.
for (unsigned i = 0; i < numberOfOutputChannels; ++i)
memcpy(outputBus->channel(i)->mutableData(), outputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
// Update the buffering index.
m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
// m_bufferReadWriteIndex will wrap back around to 0 when the current input and output buffers are full.
// When this happens, fire an event and swap buffers.
if (!m_bufferReadWriteIndex) {
// Avoid building up requests on the main thread to fire process events when they're not being handled.
// This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
if (m_isRequestOutstanding) {
// We're late in handling the previous request. The main thread must be very busy.
// The best we can do is clear out the buffer ourself here.
outputBuffer->zero();
} else {
// Reference ourself so we don't accidentally get deleted before fireProcessEvent() gets called.
ref();
// Fire the event on the main thread, not this one (which is the realtime audio thread).
m_doubleBufferIndexForEvent = m_doubleBufferIndex;
m_isRequestOutstanding = true;
callOnMainThread(fireProcessEventDispatch, this);
}
swapBuffers();
}
}
示例3: process
void ScriptProcessorHandler::process(size_t framesToProcess)
{
// Discussion about inputs and outputs:
// As in other AudioNodes, ScriptProcessorNode uses an AudioBus for its input and output (see inputBus and outputBus below).
// Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below).
// This node is the producer for inputBuffer and the consumer for outputBuffer.
// The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer.
// Get input and output busses.
AudioBus* inputBus = input(0).bus();
AudioBus* outputBus = output(0).bus();
// Get input and output buffers. We double-buffer both the input and output sides.
unsigned doubleBufferIndex = this->doubleBufferIndex();
bool isDoubleBufferIndexGood = doubleBufferIndex < 2 && doubleBufferIndex < m_inputBuffers.size() && doubleBufferIndex < m_outputBuffers.size();
ASSERT(isDoubleBufferIndexGood);
if (!isDoubleBufferIndexGood)
return;
AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
// Check the consistency of input and output buffers.
unsigned numberOfInputChannels = m_internalInputBus->numberOfChannels();
bool buffersAreGood = outputBuffer && bufferSize() == outputBuffer->length() && m_bufferReadWriteIndex + framesToProcess <= bufferSize();
// If the number of input channels is zero, it's ok to have inputBuffer = 0.
if (m_internalInputBus->numberOfChannels())
buffersAreGood = buffersAreGood && inputBuffer && bufferSize() == inputBuffer->length();
ASSERT(buffersAreGood);
if (!buffersAreGood)
return;
// We assume that bufferSize() is evenly divisible by framesToProcess - should always be true, but we should still check.
bool isFramesToProcessGood = framesToProcess && bufferSize() >= framesToProcess && !(bufferSize() % framesToProcess);
ASSERT(isFramesToProcessGood);
if (!isFramesToProcessGood)
return;
unsigned numberOfOutputChannels = outputBus->numberOfChannels();
bool channelsAreGood = (numberOfInputChannels == m_numberOfInputChannels) && (numberOfOutputChannels == m_numberOfOutputChannels);
ASSERT(channelsAreGood);
if (!channelsAreGood)
return;
for (unsigned i = 0; i < numberOfInputChannels; ++i)
m_internalInputBus->setChannelMemory(i, inputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, framesToProcess);
if (numberOfInputChannels)
m_internalInputBus->copyFrom(*inputBus);
// Copy from the output buffer to the output.
for (unsigned i = 0; i < numberOfOutputChannels; ++i)
memcpy(outputBus->channel(i)->mutableData(), outputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
// Update the buffering index.
m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
// m_bufferReadWriteIndex will wrap back around to 0 when the current input and output buffers are full.
// When this happens, fire an event and swap buffers.
if (!m_bufferReadWriteIndex) {
// Avoid building up requests on the main thread to fire process events when they're not being handled.
// This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
// The audio thread can't block on this lock, so we call tryLock() instead.
MutexTryLocker tryLocker(m_processEventLock);
if (!tryLocker.locked()) {
// We're late in handling the previous request. The main thread must be very busy.
// The best we can do is clear out the buffer ourself here.
outputBuffer->zero();
} else if (context()->executionContext()) {
// Fire the event on the main thread, not this one (which is the realtime audio thread).
m_doubleBufferIndexForEvent = m_doubleBufferIndex;
context()->executionContext()->postTask(BLINK_FROM_HERE, createCrossThreadTask(&ScriptProcessorHandler::fireProcessEvent, PassRefPtr<ScriptProcessorHandler>(this)));
}
swapBuffers();
}
}
示例4: process
void OscillatorHandler::process(size_t framesToProcess)
{
AudioBus* outputBus = output(0).bus();
if (!isInitialized() || !outputBus->numberOfChannels()) {
outputBus->zero();
return;
}
ASSERT(framesToProcess <= m_phaseIncrements.size());
if (framesToProcess > m_phaseIncrements.size())
return;
// The audio thread can't block on this lock, so we call tryLock() instead.
MutexTryLocker tryLocker(m_processLock);
if (!tryLocker.locked()) {
// Too bad - the tryLock() failed. We must be in the middle of changing wave-tables.
outputBus->zero();
return;
}
// We must access m_periodicWave only inside the lock.
if (!m_periodicWave.get()) {
outputBus->zero();
return;
}
size_t quantumFrameOffset;
size_t nonSilentFramesToProcess;
updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, nonSilentFramesToProcess);
if (!nonSilentFramesToProcess) {
outputBus->zero();
return;
}
unsigned periodicWaveSize = m_periodicWave->periodicWaveSize();
double invPeriodicWaveSize = 1.0 / periodicWaveSize;
float* destP = outputBus->channel(0)->mutableData();
ASSERT(quantumFrameOffset <= framesToProcess);
// We keep virtualReadIndex double-precision since we're accumulating values.
double virtualReadIndex = m_virtualReadIndex;
float rateScale = m_periodicWave->rateScale();
float invRateScale = 1 / rateScale;
bool hasSampleAccurateValues = calculateSampleAccuratePhaseIncrements(framesToProcess);
float frequency = 0;
float* higherWaveData = 0;
float* lowerWaveData = 0;
float tableInterpolationFactor = 0;
if (!hasSampleAccurateValues) {
frequency = m_frequency->smoothedValue();
float detune = m_detune->smoothedValue();
float detuneScale = powf(2, detune / 1200);
frequency *= detuneScale;
m_periodicWave->waveDataForFundamentalFrequency(frequency, lowerWaveData, higherWaveData, tableInterpolationFactor);
}
float incr = frequency * rateScale;
float* phaseIncrements = m_phaseIncrements.data();
unsigned readIndexMask = periodicWaveSize - 1;
// Start rendering at the correct offset.
destP += quantumFrameOffset;
int n = nonSilentFramesToProcess;
while (n--) {
unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
unsigned readIndex2 = readIndex + 1;
// Contain within valid range.
readIndex = readIndex & readIndexMask;
readIndex2 = readIndex2 & readIndexMask;
if (hasSampleAccurateValues) {
incr = *phaseIncrements++;
frequency = invRateScale * incr;
m_periodicWave->waveDataForFundamentalFrequency(frequency, lowerWaveData, higherWaveData, tableInterpolationFactor);
}
float sample1Lower = lowerWaveData[readIndex];
float sample2Lower = lowerWaveData[readIndex2];
float sample1Higher = higherWaveData[readIndex];
float sample2Higher = higherWaveData[readIndex2];
// Linearly interpolate within each table (lower and higher).
float interpolationFactor = static_cast<float>(virtualReadIndex) - readIndex;
float sampleHigher = (1 - interpolationFactor) * sample1Higher + interpolationFactor * sample2Higher;
float sampleLower = (1 - interpolationFactor) * sample1Lower + interpolationFactor * sample2Lower;
// Then interpolate between the two tables.
float sample = (1 - tableInterpolationFactor) * sampleHigher + tableInterpolationFactor * sampleLower;
//.........这里部分代码省略.........
示例5: process
void RecorderNode::process(ContextRenderLock& r, size_t framesToProcess)
{
AudioBus* outputBus = output(0)->bus(r);
if (!isInitialized() || !input(0)->isConnected())
{
if (outputBus)
outputBus->zero();
return;
}
// =====> should this follow the WebAudio pattern have a writer object to call here?
AudioBus* bus = input(0)->bus(r);
bool isBusGood = bus && (bus->numberOfChannels() > 0) && (bus->channel(0)->length() >= framesToProcess);
if (!isBusGood)
{
outputBus->zero();
return;
}
if (m_recording)
{
std::vector<const float*> channels;
unsigned numberOfChannels = bus->numberOfChannels();
for (unsigned int i = 0; i < numberOfChannels; ++i)
{
channels.push_back(bus->channel(i)->data());
}
// mix down the output, or interleave the output
// use the tightest loop possible since this is part of the processing step
std::lock_guard<std::recursive_mutex> lock(m_mutex);
m_data.reserve(framesToProcess * (m_mixToMono ? 1 : 2));
if (m_mixToMono)
{
if (numberOfChannels == 1)
{
for (size_t i = 0; i < framesToProcess; ++i)
{
m_data.push_back(channels[0][i]);
}
}
else
{
for (size_t i = 0; i < framesToProcess; ++i)
{
float val = 0;
for (unsigned int c = 0; c < numberOfChannels; ++ c)
{
val += channels[c][i];
}
val *= 1.0f / float(numberOfChannels);
m_data.push_back(val);
}
}
}
else
{
for (size_t i = 0; i < framesToProcess; ++i)
{
for (unsigned int c = 0; c < numberOfChannels; ++ c)
{
m_data.push_back(channels[c][i]);
}
}
}
}
// <====== to here
// For in-place processing, our override of pullInputs() will just pass the audio data
// through unchanged if the channel count matches from input to output
// (resulting in inputBus == outputBus). Otherwise, do an up-mix to stereo.
if (bus != outputBus)
{
outputBus->copyFrom(*bus);
}
}
示例6: process
void SpectralMonitorNode::process(ContextRenderLock& r, size_t framesToProcess)
{
// deal with the output in case the power monitor node is embedded in a signal chain for some reason.
// It's merely a pass through though.
AudioBus* outputBus = output(0)->bus(r);
if (!isInitialized() || !input(0)->isConnected()) {
if (outputBus)
outputBus->zero();
return;
}
AudioBus* bus = input(0)->bus(r);
bool isBusGood = bus && bus->numberOfChannels() > 0 && bus->channel(0)->length() >= framesToProcess;
if (!isBusGood) {
outputBus->zero();
return;
}
// specific to this node
{
std::vector<const float*> channels;
unsigned numberOfChannels = bus->numberOfChannels();
for (unsigned c = 0; c < numberOfChannels; ++ c)
channels.push_back(bus->channel(c)->data());
// if the fft is smaller than the quantum, just grab a chunk
if (internalNode->windowSize < framesToProcess)
{
internalNode->cursor = 0;
framesToProcess = internalNode->windowSize;
}
// if the quantum overlaps the end of the window, just fill up the buffer
if (internalNode->cursor + framesToProcess > internalNode->windowSize)
framesToProcess = internalNode->windowSize - internalNode->cursor;
{
std::lock_guard<std::recursive_mutex> lock(internalNode->magMutex);
internalNode->buffer.resize(internalNode->windowSize);
for (size_t i = 0; i < framesToProcess; ++i)
{
internalNode->buffer[i + internalNode->cursor] = 0;
}
for (unsigned c = 0; c < numberOfChannels; ++c)
{
for (size_t i = 0; i < framesToProcess; ++i)
{
float p = channels[c][i];
internalNode->buffer[i + internalNode->cursor] += p;
}
}
}
// advance the cursor
internalNode->cursor += framesToProcess;
if (internalNode->cursor >= internalNode->windowSize)
internalNode->cursor = 0;
}
// to here
// For in-place processing, our override of pullInputs() will just pass the audio data
// through unchanged if the channel count matches from input to output
// (resulting in inputBus == outputBus). Otherwise, do an up-mix to stereo.
//
if (bus != outputBus)
outputBus->copyFrom(*bus);
}
示例7: process
void AudioBufferSourceNode::process(size_t framesToProcess)
{
AudioBus* outputBus = output(0)->bus();
if (!isInitialized()) {
outputBus->zero();
return;
}
// The audio thread can't block on this lock, so we call tryLock() instead.
// Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
if (m_processLock.tryLock()) {
// Check if it's time to start playing.
float sampleRate = this->sampleRate();
double quantumStartTime = context()->currentTime();
double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
// If we know the end time and it's already passed, then don't bother doing any more rendering this cycle.
if (m_endTime != UnknownTime && m_endTime <= quantumStartTime) {
m_isPlaying = false;
m_virtualReadIndex = 0;
finish();
}
if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime >= quantumEndTime) {
// FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
outputBus->zero();
m_processLock.unlock();
return;
}
double quantumTimeOffset = m_startTime > quantumStartTime ? m_startTime - quantumStartTime : 0;
size_t quantumFrameOffset = static_cast<unsigned>(quantumTimeOffset * sampleRate);
quantumFrameOffset = min(quantumFrameOffset, framesToProcess); // clamp to valid range
size_t bufferFramesToProcess = framesToProcess - quantumFrameOffset;
// Render by reading directly from the buffer.
renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess);
// Apply the gain (in-place) to the output bus.
double totalGain = gain()->value() * m_buffer->gain();
outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
// If the end time is somewhere in the middle of this time quantum, then simply zero out the
// frames starting at the end time.
if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
size_t zeroStartFrame = narrowPrecisionToFloat((m_endTime - quantumStartTime) * sampleRate);
size_t framesToZero = framesToProcess - zeroStartFrame;
bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
ASSERT(isSafe);
if (isSafe) {
for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
memset(outputBus->channel(i)->data() + zeroStartFrame, 0, sizeof(float) * framesToZero);
}
m_isPlaying = false;
m_virtualReadIndex = 0;
finish();
}
m_processLock.unlock();
} else {
// Too bad - the tryLock() failed. We must be in the middle of changing buffers and were already outputting silence anyway.
outputBus->zero();
}
}
示例8: process
void ScriptProcessorHandler::process(size_t framesToProcess) {
// Discussion about inputs and outputs:
// As in other AudioNodes, ScriptProcessorNode uses an AudioBus for its input
// and output (see inputBus and outputBus below). Additionally, there is a
// double-buffering for input and output which is exposed directly to
// JavaScript (see inputBuffer and outputBuffer below). This node is the
// producer for inputBuffer and the consumer for outputBuffer. The JavaScript
// code is the consumer of inputBuffer and the producer for outputBuffer.
// Get input and output busses.
AudioBus* inputBus = input(0).bus();
AudioBus* outputBus = output(0).bus();
// Get input and output buffers. We double-buffer both the input and output
// sides.
unsigned doubleBufferIndex = this->doubleBufferIndex();
bool isDoubleBufferIndexGood = doubleBufferIndex < 2 &&
doubleBufferIndex < m_inputBuffers.size() &&
doubleBufferIndex < m_outputBuffers.size();
DCHECK(isDoubleBufferIndexGood);
if (!isDoubleBufferIndexGood)
return;
AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
// Check the consistency of input and output buffers.
unsigned numberOfInputChannels = m_internalInputBus->numberOfChannels();
bool buffersAreGood =
outputBuffer && bufferSize() == outputBuffer->length() &&
m_bufferReadWriteIndex + framesToProcess <= bufferSize();
// If the number of input channels is zero, it's ok to have inputBuffer = 0.
if (m_internalInputBus->numberOfChannels())
buffersAreGood =
buffersAreGood && inputBuffer && bufferSize() == inputBuffer->length();
DCHECK(buffersAreGood);
if (!buffersAreGood)
return;
// We assume that bufferSize() is evenly divisible by framesToProcess - should
// always be true, but we should still check.
bool isFramesToProcessGood = framesToProcess &&
bufferSize() >= framesToProcess &&
!(bufferSize() % framesToProcess);
DCHECK(isFramesToProcessGood);
if (!isFramesToProcessGood)
return;
unsigned numberOfOutputChannels = outputBus->numberOfChannels();
bool channelsAreGood = (numberOfInputChannels == m_numberOfInputChannels) &&
(numberOfOutputChannels == m_numberOfOutputChannels);
DCHECK(channelsAreGood);
if (!channelsAreGood)
return;
for (unsigned i = 0; i < numberOfInputChannels; ++i)
m_internalInputBus->setChannelMemory(
i, inputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex,
framesToProcess);
if (numberOfInputChannels)
m_internalInputBus->copyFrom(*inputBus);
// Copy from the output buffer to the output.
for (unsigned i = 0; i < numberOfOutputChannels; ++i)
memcpy(outputBus->channel(i)->mutableData(),
outputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex,
sizeof(float) * framesToProcess);
// Update the buffering index.
m_bufferReadWriteIndex =
(m_bufferReadWriteIndex + framesToProcess) % bufferSize();
// m_bufferReadWriteIndex will wrap back around to 0 when the current input
// and output buffers are full.
// When this happens, fire an event and swap buffers.
if (!m_bufferReadWriteIndex) {
// Avoid building up requests on the main thread to fire process events when
// they're not being handled. This could be a problem if the main thread is
// very busy doing other things and is being held up handling previous
// requests. The audio thread can't block on this lock, so we call
// tryLock() instead.
MutexTryLocker tryLocker(m_processEventLock);
if (!tryLocker.locked()) {
// We're late in handling the previous request. The main thread must be
// very busy. The best we can do is clear out the buffer ourself here.
outputBuffer->zero();
} else if (context()->getExecutionContext()) {
// With the realtime context, execute the script code asynchronously
// and do not wait.
if (context()->hasRealtimeConstraint()) {
// Fire the event on the main thread with the appropriate buffer
// index.
context()->getExecutionContext()->postTask(
BLINK_FROM_HERE,
createCrossThreadTask(&ScriptProcessorHandler::fireProcessEvent,
crossThreadUnretained(this),
//.........这里部分代码省略.........
示例9: process
void AudioBufferSourceNode::process(ContextRenderLock& r, size_t framesToProcess)
{
AudioBus* outputBus = output(0)->bus(r);
if (!buffer() || !isInitialized() || ! r.context()) {
outputBus->zero();
return;
}
// After calling setBuffer() with a buffer having a different number of channels, there can in rare cases be a slight delay
// before the output bus is updated to the new number of channels because of use of tryLocks() in the context's updating system.
// In this case, if the the buffer has just been changed and we're not quite ready yet, then just output silence.
if (numberOfChannels(r) != buffer()->numberOfChannels()) {
outputBus->zero();
return;
}
if (m_startRequested) {
// Do sanity checking of grain parameters versus buffer size.
double bufferDuration = buffer()->duration();
double grainOffset = std::max(0.0, m_requestGrainOffset);
m_grainOffset = std::min(bufferDuration, grainOffset);
m_grainOffset = grainOffset;
// Handle default/unspecified duration.
double maxDuration = bufferDuration - grainOffset;
double grainDuration = m_requestGrainDuration;
if (!grainDuration)
grainDuration = maxDuration;
grainDuration = std::max(0.0, grainDuration);
grainDuration = std::min(maxDuration, grainDuration);
m_grainDuration = grainDuration;
m_isGrain = true;
m_startTime = m_requestWhen;
// We call timeToSampleFrame here since at playbackRate == 1 we don't want to go through linear interpolation
// at a sub-sample position since it will degrade the quality.
// When aligned to the sample-frame the playback will be identical to the PCM data stored in the buffer.
// Since playbackRate == 1 is very common, it's worth considering quality.
m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
m_startRequested = false;
}
size_t quantumFrameOffset;
size_t bufferFramesToProcess;
updateSchedulingInfo(r, framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess);
if (!bufferFramesToProcess)
{
outputBus->zero();
return;
}
for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
{
m_destinationChannels[i] = outputBus->channel(i)->mutableData();
}
// Render by reading directly from the buffer.
if (!renderFromBuffer(r, outputBus, quantumFrameOffset, bufferFramesToProcess))
{
outputBus->zero();
return;
}
// Apply the gain (in-place) to the output bus.
float totalGain = gain()->value(r) * m_buffer->gain();
outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
outputBus->clearSilentFlag();
}