本文整理汇总了C++中AudioSampleBuffer::getSample方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer::getSample方法的具体用法?C++ AudioSampleBuffer::getSample怎么用?C++ AudioSampleBuffer::getSample使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSampleBuffer
的用法示例。
在下文中一共展示了AudioSampleBuffer::getSample方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processBlock
void BitcrushAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
this->initializing(buffer);
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
buffer.clear(i, 0, buffer.getNumSamples());
}
float crush = bitcrush->getValue();
float wet_ = wet->getValue();
int groupedSamples = std::max(1.f, downsample->getValue() * 100);
float bitdepth = 12. * (1. - crush) + 1. * crush;
int steps = exp2(bitdepth);
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
for (int channel = 0; channel < getNumInputChannels(); channel++)
{
for (int sample = 0; sample < buffer.getNumSamples() - groupedSamples; sample += groupedSamples) {
float averagedSample = 0.;
for (int i = 0; i < groupedSamples; i++) {
averagedSample += buffer.getSample(channel, i + sample) / groupedSamples;
}
int discretizedSample = averagedSample * steps;
float crushed = float(discretizedSample) / steps;
for (int i = 0; i < groupedSamples; i++) {
float sampleValue = buffer.getSample(channel, i + sample);
buffer.setSample(channel, i + sample, sampleValue * (1. - wet_) + crushed * wet_);
}
}
float averagedSample = 0.;
for (int i = (buffer.getNumSamples() / groupedSamples) * groupedSamples; i < buffer.getNumSamples(); i++) {
averagedSample += buffer.getSample(channel, i) / (buffer.getNumSamples() % groupedSamples);
}
float bitdepth = 12. * (1. - crush) + 1. * crush;
int steps = exp2(bitdepth);
int discretizedSample = averagedSample * steps;
float crushed = float(discretizedSample) / steps;
for (int i = (buffer.getNumSamples() / groupedSamples) * groupedSamples; i < buffer.getNumSamples(); i++) {
float sampleValue = buffer.getSample(channel, i);
buffer.setSample(channel, i, sampleValue * (1. - wet_) + crushed * wet_);
}
}
this->meteringBuffer(buffer);
this->finalizing(buffer);
}
示例2: processBlock
void EQNode::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
float in_samp_m1, in_samp_m2;
float out_samp_m1, out_samp_m2;
for(int channel = 0; channel < buffer.getNumChannels(); ++channel)
{
for(int sample = 0; sample < buffer.getNumSamples(); ++sample)
{
float in_samp = buffer.getSample(channel, sample);
float out_samp = (_b0 * in_samp + _b1 * in_samp_m1 + _b2 * in_samp_m2 - _a1 * out_samp_m1 - _a2 * out_samp_m2) / _a0;
in_samp_m2 = in_samp_m1;
in_samp_m1 = in_samp;
out_samp_m2 = out_samp_m1;
out_samp_m1 = out_samp;
buffer.setSample(channel, sample, out_samp);
}
}
}
示例3: meteringBuffer
void BlankenhainAudioProcessor::meteringBuffer(AudioSampleBuffer& buffer)
{
/* METERING CODE */
for (size_t iteration = 0; iteration < buffer.getNumSamples(); iteration++)
{
if (abs(buffer.getSample(/*channel*/ 0, iteration)) > meterValues[0])
{
meterValues[0] = abs(buffer.getSample(/*channel*/ 0, iteration));
}
if (abs(buffer.getSample(/*channel*/ 1, iteration)) > meterValues[1])
{
meterValues[1] = abs(buffer.getSample(/*channel*/ 1, iteration));
}
meterValues[2] += buffer.getSample(/*channel*/ 0, iteration) * buffer.getSample(/*channel*/ 0, iteration);
meterValues[3] += buffer.getSample(/*channel*/ 1, iteration) * buffer.getSample(/*channel*/ 1, iteration);
}
/* END METERING CODE*/
}
示例4: processBlock
void VolumeAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
this->initializing(buffer);
float currentVolumeL, currentVolumeR, bufferValue, oldVolumeL, oldVolumeR;
unsigned int maxInterpolation;
if (stereoCoupling->getBoolValue())
{
currentVolumeL = pow(10.f, (volumeL->getValue()) / 10.f);
oldVolumeL = pow(10.f, (volumeL->getOldValue()) / 10.f);
maxInterpolation = int(buffer.getNumSamples() * volumeL->getBufferScalingValue());
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(oldVolumeL + ((interpolationIteration + 1) * (currentVolumeL - oldVolumeL) \
/ maxInterpolation)));
bufferValue = buffer.getSample(/*channel*/ 1, interpolationIteration);
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
(oldVolumeL + ((interpolationIteration + 1) * (currentVolumeL - oldVolumeL) \
/ maxInterpolation)));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * currentVolumeL);
bufferValue = buffer.getSample(/*channel*/ 1, bufferIteration);
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * currentVolumeL);
}
}
else
{
currentVolumeR = pow(10.f, volumeR->getValue() / 10.f);
currentVolumeL = pow(10.f, volumeL->getValue() / 10.f);
oldVolumeL = pow(10.f, volumeL->getOldValue() / 10.f);
oldVolumeR = pow(10.f, volumeR->getOldValue() / 10.f);
maxInterpolation = int(buffer.getNumSamples() * volumeL->getBufferScalingValue());
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(oldVolumeL + ((interpolationIteration + 1) * (currentVolumeL - oldVolumeL) \
/ maxInterpolation)));
bufferValue = buffer.getSample(/*channel*/ 1, interpolationIteration);
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
(oldVolumeR + ((interpolationIteration + 1) * (currentVolumeR - oldVolumeR) \
/ maxInterpolation )));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * currentVolumeL);
bufferValue = buffer.getSample(/*channel*/ 1, bufferIteration);
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * currentVolumeR);
}
}
//Set current values as old values for interpolation in next buffer iteration
volumeL->setOldValue(volumeL->getValue());
volumeR->setOldValue(volumeR->getValue());
this->meteringBuffer(buffer);
this->finalizing(buffer);
}
示例5: processBlock
void PanAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
this->initializing(buffer);
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
buffer.clear(i, 0, buffer.getNumSamples());
}
// currentPanning: Set by Editor before this buffer iteration
// oldPanning: Was Set in Editor after last buffer Iteration
// Interpolation from oldPanning to currentPanning
// momentaryPanning: Helper Variable, keeps results of current Interpolation iteration during Interpolation
float currentPanning, oldPanning, bufferValue, momentaryPanning;
unsigned int maxInterpolation;
currentPanning = panning->getNormalizedValue();
oldPanning = panning->getNormalizedOldValue();
maxInterpolation = int(buffer.getNumSamples() * panning->getBufferScalingValue());
if (getNumInputChannels() == 1)
{
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
momentaryPanning = (oldPanning + ((interpolationIteration + 1) * (currentPanning - oldPanning) / maxInterpolation));
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(1.f - 2 * (std::max(0.5f, momentaryPanning) - 0.5f)));
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
2 * (std::min(0.5f, momentaryPanning)));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * (1.f - 2 * (std::max(0.5f, currentPanning) - 0.5f)));
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * 2 * (std::min(0.5f, currentPanning)));
}
}
else
{
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
momentaryPanning = (oldPanning + ((interpolationIteration + 1) * (currentPanning - oldPanning) / maxInterpolation));
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(1.f - 2 * (std::max(0.5f, momentaryPanning) - 0.5f)));
bufferValue = buffer.getSample(/*channel*/ 1, interpolationIteration);
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
2 * (std::min(0.5f, momentaryPanning)));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * (1.f - 2 * (std::max(0.5f, currentPanning) - 0.5f)));
bufferValue = buffer.getSample(/*channel*/ 1, bufferIteration);
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * 2 * (std::min(0.5f, currentPanning)));
}
}
panning->setOldValue();
this->meteringBuffer(buffer);
this->finalizing(buffer);
}
示例6: renderNextBlock
void StreamingSamplerVoice::renderNextBlock(AudioSampleBuffer &outputBuffer, int startSample, int numSamples)
{
const StreamingSamplerSound *sound = loader.getLoadedSound();
#if USE_SAMPLE_DEBUG_COUNTER
const int startDebug = startSample;
const int numDebug = numSamples;
#endif
if (sound != nullptr)
{
const double startAlpha = fmod(voiceUptime, 1.0);
jassert(pitchCounter != 0);
auto tempVoiceBuffer = getTemporaryVoiceBuffer();
jassert(tempVoiceBuffer != nullptr);
tempVoiceBuffer->clear();
// Copy the not resampled values into the voice buffer.
StereoChannelData data = loader.fillVoiceBuffer(*tempVoiceBuffer, pitchCounter + startAlpha);
float* outL = outputBuffer.getWritePointer(0, startSample);
float* outR = outputBuffer.getWritePointer(1, startSample);
const int startFixed = startSample;
const int numSamplesFixed = numSamples;
#if USE_SAMPLE_DEBUG_COUNTER
jassert((int)voiceUptime == data.leftChannel[0]);
#endif
double indexInBuffer = startAlpha;
if (data.isFloatingPoint)
{
const float* const inL = static_cast<const float*>(data.leftChannel);
const float* const inR = static_cast<const float*>(data.rightChannel);
interpolateStereoSamples(inL, inR, pitchData, outL, outR, startSample, indexInBuffer, uptimeDelta, numSamples, true);
}
else
{
const int16* const inL = static_cast<const int16*>(data.leftChannel);
const int16* const inR = static_cast<const int16*>(data.rightChannel);
interpolateStereoSamples(inL, inR, pitchData, outL, outR, startSample, indexInBuffer, uptimeDelta, numSamples, false);
}
#if USE_SAMPLE_DEBUG_COUNTER
for (int i = startDebug; i < numDebug; i++)
{
const float l = outputBuffer.getSample(0, i);
const float r = outputBuffer.getSample(1, i);
jassert(l == r);
jassert((abs(l - voiceUptime) < 0.000001) || l == 0.0f);
voiceUptime += uptimeDelta;
}
outputBuffer.clear();
#else
voiceUptime += pitchCounter;
#endif
if (!loader.advanceReadIndex(voiceUptime))
{
#if LOG_SAMPLE_RENDERING
logger->addStreamingFailure(voiceUptime);
#endif
outputBuffer.clear(startFixed, numSamplesFixed);
resetVoice();
return;
}
const bool enoughSamples = sound->hasEnoughSamplesForBlock((int)(voiceUptime));// +numSamples * MAX_SAMPLER_PITCH));
#if LOG_SAMPLE_RENDERING
logger->checkSampleData(nullptr, DebugLogger::Location::SampleVoiceBufferFillPost, true, outputBuffer.getReadPointer(0, startFixed), numSamplesFixed);
logger->checkSampleData(nullptr, DebugLogger::Location::SampleVoiceBufferFillPost, false, outputBuffer.getReadPointer(1, startFixed), numSamplesFixed);
#endif
if (!enoughSamples) resetVoice();
}
else
{
resetVoice();
}
};