本文整理汇总了C++中AudioSampleBuffer::applyGain方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer::applyGain方法的具体用法?C++ AudioSampleBuffer::applyGain怎么用?C++ AudioSampleBuffer::applyGain使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSampleBuffer
的用法示例。
在下文中一共展示了AudioSampleBuffer::applyGain方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processBlock
void SuperSpreadAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& /*midiMessages*/)
{
unsigned int csr = _mm_getcsr();
_mm_setcsr(csr | 0x8040);
AudioProcessorParameter* mixParam = parameterState->getParameter("Mix");
const NormalisableRange<float> mixRange(parameterState->getParameterRange("Mix"));
const float spread0 = parameterState->getParameter("Spread")->getValue();
const float mix = mixRange.convertFrom0to1(mixParam->getValue());
const float detuneFade = jmin(spread0/0.1f, 1.f);
const float detunedGain = mix >= 100.f ? 1.f : mix / 100.f;
const float dryGain = mix <= 100.f ? 1.f : detuneFade < 1.f ? jmax(0.5f * (1.f - detuneFade), (200.f - mix) / 100.f) : (200.f - mix) / 100.f;
const float spreadGain = detunedGain * detuneFade;
const float spread = 0.5f * spread0*spread0;
const int numChannels = buffer.getNumChannels();
const int numSamples = buffer.getNumSamples();
float* chL = buffer.getWritePointer(0);
float* chR = numChannels == 2 ? buffer.getWritePointer(1) : nullptr;
for (int i=0; i<12 / 2; ++i)
{
pitchBuffer.copyFrom(i, 0, chL, numSamples);
if (chR != nullptr)
pitchBuffer.copyFrom(6 + i, 0, chR, numSamples);
}
mainDelay.processBlock(chL, chR, numSamples);
buffer.applyGain(dryGain);
const float maxPitches[6] = {0.893f, 0.939f, 0.98f, 1.02f, 1.064f, 1.11f};
for (int i=0; i<6; ++i)
{
shifter[i]->setPitch(std::pow(maxPitches[i], spread));
shifter[i+6]->setPitch(std::pow(1.f / maxPitches[i], spread));
float* procL = pitchBuffer.getWritePointer(i);
float* procR = pitchBuffer.getWritePointer(i+6);
shifter[i]->processBlock(procL, numSamples);
buffer.addFrom(0, 0, procL, numSamples, spreadGain/* * gain*/);
if (numChannels == 2)
{
shifter[i+6]->processBlock(procR, numSamples);
buffer.addFrom(1, 0, procR, numSamples, spreadGain/* * gain*/);
}
}
const float totalGain = spreadGain == 0.f ? 1.f : 1.41f / (1.f + std::sqrt(6.f) * spreadGain);
buffer.applyGain(totalGain);
_mm_setcsr(csr);
}
示例2: processBlock
void ZenAutoTrimAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
if (isEnabled())
{
aPlayHead = getPlayHead();
AudioPlayHead::CurrentPositionInfo posInfo;
aPlayHead->getCurrentPosition(posInfo);
//float* leftData = buffer.getWritePointer(0); //leftData references left channel now
//float* rightData = buffer.getWritePointer(1); //right data references right channel now
//unsigned int numSamples = buffer.getNumSamples();
if (prevSampleRate != this->getSampleRate())
{
prevSampleRate = this->getSampleRate();
levelAnalysisManager.sampleRateChanged(prevSampleRate);
}
//don't process if all samples are 0 or if autogain button is off
if (buffer.getMagnitude(0, buffer.getNumSamples()) > 0.0f && autoGainEnableParam->isOn())
levelAnalysisManager.processSamples(&buffer, posInfo);
// Calibrate gain param based on which value is target
double peakToHit;
int targetType = targetTypeParam->getValueAsInt();
if (targetType == Peak)
{
peakToHit = levelAnalysisManager.getMaxChannelPeak();
}
else if (targetType == MaxRMS)
{
peakToHit = levelAnalysisManager.getMaxChannelRMS();
}
else if (targetType == AverageRMS)
{
peakToHit = levelAnalysisManager.getMaxCurrentRunningRMS();
}
else
{
peakToHit = levelAnalysisManager.getMaxChannelPeak();
jassertfalse;
}
//double targParamGain = params->getDecibelParameter(targetGainParamID)->getValueInGain();
//division in log equiv to subtract in base
double gainValueToAdd = targetGainParam->getValueInGain() / peakToHit;
if (!almostEqual(gainValueToAdd, gainParam->getValueInGain())) // gain value changed
{
gainParam->setValueFromGain(gainValueToAdd);
//gainParam->setNeedsUIUpdate(true); // removed because done in setValueFromGain
}
//in gain, multiply in log equivalent to add in base
buffer.applyGain(gainParam->getValueInGain());
}
}
示例3: processBlock
void GainLawsAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
const int numTracks = getNumInputChannels();
if (pluginON)
{
if (panMode == 0)
{
L = C;
R = 1-C;
}
else if (panMode == 1)
{
L = sqrt(C);
R = sqrt(1-C);
}
else if (panMode == 2)
{
L = sin(C*(M_PI/2));
R = cos(C*(M_PI/2));
}
// Apply gain
if (numTracks == 1)
{
buffer.clear(1,0,numSamples);
buffer.copyFrom(1,0,buffer.getSampleData(0),numSamples);
}
buffer.applyGain(0,0,numSamples,(float)R);
buffer.applyGain(1,0,numSamples,(float)L);
}
}
示例4: processBlock
void PitchestimatorpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// In case we have more outputs than inputs, this code clears any output
// channels that didn't contain input data
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
int bufsize = buffer.getNumSamples();
//main process loop
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getWritePointer (channel);
fft->processForward(channelData, fftData, bufsize, nFFT);
buffer.applyGain (channel, 0, bufsize, gain);
}
for (int i=0; i<bufsize; i++) {
X[i] = fft->cartopolRadius(fftData[i][0], fftData[i][1]);
}
HS->generateCost(X, f0Area, numberOfHarmonics, bufsize, f0AreaSize, getSampleRate(), nFFT);
pitchEstimate = HS->estimatePitch(f0Area, f0AreaSize);
pitchText = String (pitchEstimate, 1);
}
示例5: processBlock
void MainAudioProcessor::processBlock (AudioSampleBuffer& buffer,
MidiBuffer& midiMessages)
{
gateTrigger->processBlock (buffer, midiMessages);
buffer.applyGain (0, buffer.getNumSamples(), 0);
AudioPlayHead::CurrentPositionInfo newTime = pluginAudioProcessor->getLastPosInfo();
playing = newTime.isPlaying;
resetTimer->processBlock (buffer, midiMessages);
sequencer->processBlock (buffer, midiMessages);
if (synthEnabled) {
synthAudioProcessor->processBlock (buffer, midiMessages);
}
if (metronomeEnabled) {
metronomeAudioProcessor->processBlock (buffer, midiMessages);
}
buffer.applyGain (0, buffer.getNumSamples(), gain);
}
示例6: processBlock
void AudioPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
synth.clearSounds();
synth.addSound(getSound());
const int numSamples = buffer.getNumSamples();
int channel, dp = 0;
// Go through the incoming data, and apply our gain to it...
for (channel = 0; channel < getNumInputChannels(); ++channel)
buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
// Now pass any incoming midi messages to our keyboard state object, and let it
// add messages to the buffer if the user is clicking on the on-screen keys
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
// and now get the synth to process these midi events and generate its output.
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
// Apply our delay effect to the new output..
for (channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getSampleData (channel);
float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1));
dp = delayPosition;
for (int i = 0; i < numSamples; ++i)
{
const float in = channelData[i];
channelData[i] += delayData[dp];
delayData[dp] = (delayData[dp] + in) * delay;
if (++dp >= delayBuffer.getNumSamples())
dp = 0;
}
}
delayPosition = dp;
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
// ask the host for the current time so we can display it...
AudioPlayHead::CurrentPositionInfo newTime;
if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime))
{
// Successfully got the current time from the host..
lastPosInfo = newTime;
}
else
{
// If the host fails to fill-in the current time, we'll just clear it to a default..
lastPosInfo.resetToDefault();
}
}
示例7: processBlock
void OOTrack::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
updatePendingLooperState(OOServer::getInstance()->looperState.timeInSamples);
OOServer::getInstance()->updateGlobalTime(this,buffer.getNumSamples());
// RECORDING
if (*isRecording)
{
if(recordNeedle.get() + buffer.getNumSamples()> getSampleRate() * MAX_LOOP_LENGTH_S){
*shouldRecord = false;
*isRecording = false;
};
monoLoopSample.copyFrom(0, recordNeedle.get(), buffer, 0, 0, buffer.getNumSamples());
recordNeedle += buffer.getNumSamples();
rmsOut = (1-rmsAlpha)*rmsOut+ rmsAlpha*buffer.getRMSLevel(0, 0, buffer.getNumSamples());
}
else{
streamBipBuffer.writeBlock(buffer);
}
// PLAYING
// allow circular reading , although not sure that overflow need to be handled as its written with same block sizes than read
// we may need it if we start to use a different clock than looperState in OOServer that has a granularity of blockSize
// or if we dynamicly change blockSize
if (*isPlaying && recordNeedle.get()>0 && monoLoopSample.getNumSamples())
{
if ( (playNeedle + buffer.getNumSamples()) > recordNeedle.get())
{
//assert false for now see above
// jassert(false);
int firstSegmentLength = recordNeedle.get() - playNeedle;
int secondSegmentLength = buffer.getNumSamples() - firstSegmentLength;
buffer.copyFrom(0, 0, monoLoopSample, 0, playNeedle, firstSegmentLength);
buffer.copyFrom(0, 0, monoLoopSample, 0, 0, secondSegmentLength);
playNeedle = secondSegmentLength;
}else{
buffer.copyFrom(0, 0, monoLoopSample, 0, playNeedle, buffer.getNumSamples());
playNeedle += buffer.getNumSamples();
playNeedle %= recordNeedle.get();
}
buffer.applyGainRamp(0, 0, buffer.getNumSamples(), lastVolume,*volume);
lastVolume = *volume;
rmsOut = (1-rmsAlpha)*rmsOut+ rmsAlpha*buffer.getRMSLevel(0, 0, buffer.getNumSamples());
}
else{
// silence output buffer
buffer.applyGain(0, 0, buffer.getNumSamples(), 0);
}
}
示例8: pluginProcessing
void SimpleDistortionAudioProcessor::pluginProcessing (AudioSampleBuffer &buffer, MidiBuffer &midiMessages)
{
// This function is equivalent to the standard JUCE plug-in
// processBlock(). Use it to do your plug-in's processing.
//
// Parameter values can be accessed using the variables passed
// to them by reference in the constructor.
// Apply the gain in our drive variable.
// Note we do not need to convert from dB as we added it as a dB parameter.
buffer.applyGain (drive);
// Get info about the amount of audio we have been
// given to process.
int numChannels = buffer.getNumChannels();
int numSamples = buffer.getNumSamples();
// Loop through each channel
for (int channel = 0; channel < numChannels; ++channel)
{
// Get a pointer to the current channel of audio.
float *audioData = buffer.getWritePointer (channel);
// Loop through the samples.
for (int sample = 0; sample < numSamples; ++sample)
{
// Clip the audio.
if (audioData [sample] > threshold)
{
audioData [sample] = threshold;
}
else if (audioData [sample] < negativeThreshold)
{
audioData [sample] = negativeThreshold;
}
}
}
// Apply the gain in our gain variable.
buffer.applyGain (gain);
}
示例9: processBlock
/*
This method is where user chosen values are applied to the audio buffer.
If the track is not muted then gain is applied to both the left and right channels
based on both the current gain value as well as taking into consideration the current
panning value.
@param &buffer the buffer to be processed
*/
void ChannelStripProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer&)
{
//Check to see if the track is un-muted
if (!_muted)
{
//Apply the gain value to the left channel relative to the panning value
//buffer.applyGain(0, 0, buffer.getNumSamples(), _gain*(0));
float leftGain_ = _gain *(1.0f - _panning);
float rightGain_ = _gain *(1.0f - _panning);
buffer.applyGain(0, 0, buffer.getNumSamples(), _gain*(1.0f - _panning));
//Apply the gain value to the right channel relative to the panning value
//buffer.applyGain(1, 0, buffer.getNumSamples(), 0);
buffer.applyGain(1, 0, buffer.getNumSamples(), _gain*_panning);
}
//Check to see if the track is muted
if (_muted)
{
//Apply a gain value of 0 to the track
buffer.applyGain(_muteGain);
}
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear(i, 0, buffer.getNumSamples());
}
示例10: processBlock
void RedverbEngine::processBlock (AudioSampleBuffer& buffer,
MidiBuffer& midiMessages)
{
// for each of our input channels, we'll attenuate its level by the
// amount that our volume parameter is set to.
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
}
// Actual delay code :
int samples = buffer.getNumSamples();
int channels = buffer.getNumChannels();
float* in1 = buffer.getSampleData(0);
while (--samples >= 0)
{
float x1 = *in1;
float y1 = accBuffer[cursor];
*in1 = accBuffer[cursor];
in1++;
accBuffer[cursor++] = x1 + y1 * feedback;
if(cursor >= bufferSize)
cursor = 0;
}
// in case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例11: processBlock
void processBlock (AudioSampleBuffer& buffer, MidiBuffer&) override
{
buffer.applyGain (*gain);
}
示例12: processBlock
//.........这里部分代码省略.........
wetBuffer.copyFrom(1, 0, buffer, 1, 0, noSamples);
// mono mix wet buffer (used for stereo spread later)
float *pfWetL = wetBuffer.getSampleData(0);
float *pfWetR = wetBuffer.getSampleData(1);
while (--numSamples >= 0)
{
*pfWetL = *pfWetR = (0.5f * (*pfWetL + *pfWetR));
pfWetL++;
pfWetR++;
}
numSamples = buffer.getNumSamples();
// apply the pre-delay to the wet buffer
preDelayFilterL.processSamples(wetBuffer.getSampleData(0), noSamples);
preDelayFilterR.processSamples(wetBuffer.getSampleData(1), noSamples);
// create a buffer to hold the early reflections
AudioSampleBuffer earlyReflections(noChannels, noSamples);
earlyReflections.copyFrom(0, 0, wetBuffer, 0, 0, noSamples);
earlyReflections.copyFrom(1, 0, wetBuffer, 1, 0, noSamples);
// and process the early reflections
delayLineL.processSamples(earlyReflections.getSampleData(0), noSamples);
delayLineR.processSamples(earlyReflections.getSampleData(1), noSamples);
// create a buffer to hold the late reverb
AudioSampleBuffer lateReverb(noChannels, noSamples);
lateReverb.clear();
float *pfLateL = lateReverb.getSampleData(0);
float *pfLateR = lateReverb.getSampleData(1);
pfWetL = wetBuffer.getSampleData(0);
pfWetR = wetBuffer.getSampleData(1);
// comb filter section
for (int i = 0; i < 8; ++i)
{
combFilterL[i].processSamplesAdding(pfWetL, pfLateL, noSamples);
combFilterR[i].processSamplesAdding(pfWetR, pfLateR, noSamples);
}
// allpass filter section
for (int i = 0; i < 4; ++i)
{
allpassFilterL[i].processSamples(lateReverb.getSampleData(0), noSamples);
allpassFilterR[i].processSamples(lateReverb.getSampleData(1), noSamples);
}
// clear wet buffer
wetBuffer.clear();
// add early reflections to wet buffer
wetBuffer.addFrom(0, 0, earlyReflections, 0, 0, noSamples, early);
wetBuffer.addFrom(1, 0, earlyReflections, 1, 0, noSamples, early);
// add late reverb to wet buffer
lateReverb.applyGain(0, noSamples, 0.1f);
wetBuffer.addFrom(0, 0, lateReverb, 0, 0, noSamples, late);
wetBuffer.addFrom(1, 0, lateReverb, 1, 0, noSamples, late);
// final EQ
lowEQL.processSamples(pfWetL, noSamples);
lowEQR.processSamples(pfWetR, noSamples);
highEQL.processSamples(pfWetL, noSamples);
highEQR.processSamples(pfWetR, noSamples);
// create stereo spread
while (--numSamples >= 0)
{
float fLeft = *pfWetL;
float fRight = *pfWetR;
*pfWetL = (fLeft * spread1) + (fRight * spread2);
*pfWetR = (fRight * spread1) + (fLeft * spread2);
pfWetL++;
pfWetR++;
}
numSamples = buffer.getNumSamples();
// apply wet/dry mix gains
wetBuffer.applyGain(0, noSamples, wet);
buffer.applyGain(0, noSamples, dry);
// add wet buffer to output buffer
buffer.addFrom(0, 0, wetBuffer, 0, 0, noSamples);
buffer.addFrom(1, 0, wetBuffer, 1, 0, noSamples);
}
//========================================================================
// in case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例13: processBlock
void TheFunctionAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
int numberOfSamples = buffer.getNumSamples();
float* channelDataL = buffer.getWritePointer (0);
float* channelDataR = buffer.getWritePointer (1);
tmpBuffer.copyFrom(0, 0, channelDataL, numberOfSamples);
tmpBuffer.copyFrom(1, 0, channelDataR, numberOfSamples);
float* inputDataL = tmpBuffer.getWritePointer (0);
float* inputDataR = tmpBuffer.getWritePointer (1);
float LinLout; // Left IN Left OUT - Gain
float LinRout; // Left IN Right OUT - Gain
float RinLout; // Right IN Left OUT - Gain
float RinRout; // Right IN Right OUT - Gain
// Work out L+R channel pan positions
if (panL < 0.5)
{
LinLout = 1;
LinRout = panL * 2;
}
else
{
LinLout = ((panL *2) -2) *-1;
LinRout = 1;
}
if (panR < 0.5)
{
RinLout = 1;
RinRout = panR * 2;
}
else
{
RinLout = ((panR *2) -2) *-1;
RinRout = 1;
}
//******************
// Apply individual channel phase, pan and gain
float peakLevelL = 0;
float peakLevelR = 0;
float RMSLevelL = 0;
float RMSLevelR = 0;
for (int i = 0; i < numberOfSamples; ++i)
{
// Phase
if (phaseL >= 0.5)
inputDataL[i] *= -1;
if (phaseR >= 0.5)
inputDataR[i] *= -1;
// Pan
channelDataR[i] = (inputDataR[i] * RinRout) + (inputDataL[i] * LinRout);
channelDataL[i] = (inputDataL[i] * LinLout) + (inputDataR[i] * RinLout);
// Gain
channelDataL[i] *= gainL;
channelDataR[i] *= gainR;
if (channelDataL[i] > peakLevelL)
peakLevelL = channelDataL[i];
if (channelDataR[i] > peakLevelR)
peakLevelR = channelDataR[i];
RMSLevelL += std::abs(channelDataL[i]);
RMSLevelR += std::abs(channelDataR[i]);
}
//******************
// Master Gain
buffer.applyGain (0, numberOfSamples, gain);
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例14: processBlock
void vstSynthAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// number of samples in current buffer
const int numSamples = buffer.getNumSamples();
/*
Checks to see if delay size has changed since the last block. If it has,
the delay buffer is resized and cleared (to prevent garbage in the output)
The read and write pointers are also reset to their starting positions and
the saved filter states are removed to reduce transients.
*/
// delayTimeParam controlled by vstSynthEditor::delayTimeSlider
if (delayBuffer.getNumSamples() != getParameter(delayTimeParam) + numSamples)
{
delayBuffer.setSize(1, getParameter(delayTimeParam) + numSamples);
delayBuffer.clear();
delayWritePtr = delayBuffer.getSampleData(0) + (int) getParameter(delayTimeParam);
delayReadPtr = delayBuffer.getSampleData(0);
//hpeqFilter.reset();
}
// Receives MIDI data from host
keyboardState.processNextMidiBuffer(midiMessages, 0, numSamples, true);
// Call to vstSynthVoice::renderNextBlock where buffer is filled with raw oscillator data
vstSynth.renderNextBlock(buffer, midiMessages, 0, numSamples);
// Pointer to beginning of buffer
float* bufferPtr = buffer.getSampleData(0, 0);
// Performs tremolo (AM) if enabled, overdrive and delay operation
for (int currentSample = 0; currentSample < numSamples; currentSample++)
{
// Apply tremolo if enabled
if (getParameter(lfoDestParam) == 2) // Controlled by vstSynthEditor::lfoDestComboBox
{
tremolo.setVibratoRate(getParameter(lfoFreqParam)); // Controlled by vstSynthEditor::lfoFreqSlider
tremolo.setVibratoGain(getParameter(lfoDevParam)/10); // Controlled by vstSynthEditor::lfoDevSlider
*bufferPtr *= (float) (1+tremolo.tick()); // Modulate amplitude with tremolo output
}
// Push signal through tahn to introduce nonlinear distortion
*bufferPtr = tanhf(getParameter(driveParam) * *bufferPtr); // Controlled by vstSynthEditor::driveSlider
// Process delay if enabled
if (getParameter(delayTimeParam) > 0) // Controlled by vstSynthEditor::delayTimeSlider
{
// Add existing delay data into buffer
*bufferPtr += getParameter(delayFeedbackParam) * *delayReadPtr; // Controlled by vstSynthEditor::delayFBSlider
// Save current output data into delay buffer
*delayWritePtr = *bufferPtr;
// Increment pointers
delayWritePtr++;
delayReadPtr++;
// Circular buffer implementation: reset pointers to beginning of buffers when end is reached
if (delayReadPtr > delayBuffer.getSampleData(0) + delayBuffer.getNumSamples())
{
delayReadPtr = delayBuffer.getSampleData(0);
}
if (delayWritePtr > delayBuffer.getSampleData(0) + delayBuffer.getNumSamples())
{
delayWritePtr = delayBuffer.getSampleData(0);
}
}
// Increment pointer
bufferPtr++;
}
// Send buffer to vstSynthFilter where it is replaced with filtered data
hpeqFilter.processSamples(buffer.getSampleData(0, 0), numSamples);
// All processing happens in only one channel for speed; the other channel is filled here.
buffer.addFrom(1, 0, buffer, 0, 0, numSamples);
// Apply overall output gain to buffer before playback
buffer.applyGain(0, numSamples, 10 * getParameter(outputGainParam)); // Controlled by vstSynthEditor::outputGainSlider
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例15: processBlock
void DRowAudioFilter::processBlock (AudioSampleBuffer& buffer,
MidiBuffer& midiMessages)
{
smoothParameters();
const int numInputChannels = getNumInputChannels();
int numSamples = buffer.getNumSamples();
// set up the parameters to be used
float inGain = decibelsToAbsolute(params[INGAIN].getSmoothedValue());
float outGain = decibelsToAbsolute(params[OUTGAIN].getSmoothedValue());
buffer.applyGain(0, buffer.getNumSamples(), inGain);
if (numInputChannels == 2)
{
// get sample pointers
float* channelL = buffer.getSampleData(0);
float* channelR = buffer.getSampleData(1);
// pre-filter
inFilterL->processSamples(buffer.getSampleData(0), numSamples);
inFilterR->processSamples(buffer.getSampleData(1), numSamples);
while (--numSamples >= 0)
{
float sampleL = *channelL;
float sampleR = *channelR;
// clip samples
sampleL = jlimit(-1.0f, 1.0f, sampleL);
sampleR = jlimit(-1.0f, 1.0f, sampleR);
if (sampleL < 0.0f) {
sampleL *= -1.0f;
sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax);
sampleL *= -1.0f;
}
else {
sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax);
}
if (sampleR < 0.0f) {
sampleR *= -1.0f;
sampleR = linearInterpolate(distortionBuffer, distortionBufferSize, sampleR*distortionBufferMax);
sampleR *= -1.0f;
}
else {
sampleR = linearInterpolate(distortionBuffer, distortionBufferSize, sampleR*distortionBufferMax);
}
*channelL++ = sampleL;
*channelR++ = sampleR;
}
// post-filter
outFilterL->processSamples(buffer.getSampleData(0), buffer.getNumSamples());
outFilterR->processSamples(buffer.getSampleData(1), buffer.getNumSamples());
buffer.applyGain(0, buffer.getNumSamples(), outGain);
}
else if (numInputChannels == 1)
{
// get sample pointers
float* channelL = buffer.getSampleData(0);
// pre-filter
inFilterL->processSamples(buffer.getSampleData(0), numSamples);
while (--numSamples >= 0)
{
float sampleL = *channelL;
// clip samples
sampleL = jlimit(-1.0f, 1.0f, sampleL);
if (sampleL < 0.0f) {
sampleL *= -1.0f;
sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax);
sampleL *= -1.0f;
}
else {
sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax);
}
*channelL++ = sampleL;
}
// post-filter
outFilterL->processSamples(buffer.getSampleData(0), buffer.getNumSamples());
buffer.applyGain(0, buffer.getNumSamples(), outGain);
}
//========================================================================
// in case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
//.........这里部分代码省略.........