本文整理汇总了C++中AudioSampleBuffer::getSampleData方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer::getSampleData方法的具体用法?C++ AudioSampleBuffer::getSampleData怎么用?C++ AudioSampleBuffer::getSampleData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSampleBuffer
的用法示例。
在下文中一共展示了AudioSampleBuffer::getSampleData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: renderNextBlock
void renderNextBlock (AudioSampleBuffer& outputBuffer, int startSample, int numSamples)
{
if (angleDelta != 0.0)
{
if (tailOff > 0)
{
while (--numSamples >= 0)
{
const float currentSample = (float) (sin(currentAngle) * level * tailOff);
for (int i = outputBuffer.getNumChannels(); --i >= 0;)
*outputBuffer.getSampleData (i, startSample) += currentSample;
currentAngle += angleDelta;
++startSample;
tailOff *= 0.9999;
if (tailOff <= 0.005)
{
clearCurrentNote();
angleDelta = 0.0;
break;
}
}
}
else
{
while (--numSamples >= 0)
{
const float currentSample = (float) (sin(currentAngle) * level);
for (int i = outputBuffer.getNumChannels(); --i >= 0;)
*outputBuffer.getSampleData (i, startSample) += currentSample;
currentAngle += angleDelta;
++startSample;
}
}
}
}
示例2: processBlock
void SyEqualizerAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
if(getNumInputChannels()<2)
{
float* in = buffer.getSampleData(0);
int i = buffer.getNumSamples();
EQ.processL(*in);
}
else
{
float* inL = buffer.getSampleData(0);
float* inR = buffer.getSampleData(1);
int i = buffer.getNumSamples();
EQ.processL(*inL);
EQ.processR(*inR);
}
}
示例3: processBlock
void processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
if (initialised && plugin != nullptr && handle != nullptr)
{
for (int i = 0; i < inputs.size(); ++i)
plugin->connect_port (handle, inputs[i],
i < buffer.getNumChannels() ? buffer.getSampleData (i) : nullptr);
if (plugin->run != nullptr)
{
for (int i = 0; i < outputs.size(); ++i)
plugin->connect_port (handle, outputs.getUnchecked(i),
i < buffer.getNumChannels() ? buffer.getSampleData (i) : nullptr);
plugin->run (handle, numSamples);
return;
}
if (plugin->run_adding != nullptr)
{
tempBuffer.setSize (outputs.size(), numSamples);
tempBuffer.clear();
for (int i = 0; i < outputs.size(); ++i)
plugin->connect_port (handle, outputs.getUnchecked(i), tempBuffer.getSampleData (i));
plugin->run_adding (handle, numSamples);
for (int i = 0; i < outputs.size(); ++i)
if (i < buffer.getNumChannels())
buffer.copyFrom (i, 0, tempBuffer, i, 0, numSamples);
return;
}
jassertfalse; // no callback to use?
}
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, numSamples);
}
示例4: process
void HybridConvolver::process(AudioSampleBuffer& buffer)
{
int numSamples = buffer.getNumSamples();
int numChannels = buffer.getNumChannels();
float **data = new float*[numChannels];
for (int i = 0; i < numChannels; i++)
data[i] = buffer.getSampleData(i);
process(data, data, numChannels, numSamples);
delete[] data;
}
示例5: processBlock
void JuceDemoPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
int channel, dp = 0;
// Go through the incoming data, and apply our gain to it...
for (channel = 0; channel < getNumInputChannels(); ++channel)
buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
// Now pass any incoming midi messages to our keyboard state object, and let it
// add messages to the buffer if the user is clicking on the on-screen keys
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
// and now get the synth to process these midi events and generate its output.
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
// Apply our delay effect to the new output..
for (channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getSampleData (channel);
float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1));
dp = delayPosition;
for (int i = 0; i < numSamples; ++i)
{
const float in = channelData[i];
channelData[i] += delayData[dp];
delayData[dp] = (delayData[dp] + in) * delay;
if (++dp >= delayBuffer.getNumSamples())
dp = 0;
}
}
delayPosition = dp;
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
// ask the host for the current time so we can display it...
AudioPlayHead::CurrentPositionInfo newTime;
if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime))
{
// Successfully got the current time from the host..
lastPosInfo = newTime;
}
else
{
// If the host fails to fill-in the current time, we'll just clear it to a default..
lastPosInfo.resetToDefault();
}
}
示例6: processBlock
void UGenPlugin::processBlock(AudioSampleBuffer& buffer,
MidiBuffer& /*midiMessages*/)
{
clearExtraChannels(buffer); // see below
int numSamples = buffer.getNumSamples();
if(inputBuffer.size() < numSamples)
inputBuffer = Buffer::newClear(numSamples, getNumInputChannels(), true);
for(int i = 0; i < getNumInputChannels(); i++)
{
float *inputData = inputBuffer.getData(i);
memcpy(inputData, buffer.getSampleData(i), numSamples * sizeof(float));
inputUGen.setInput(inputData, numSamples, i);
}
const int numOutputChannels = jmin(getNumOutputChannels(), outputUGen.getNumChannels());
for(int i = 0; i < numOutputChannels; i++)
{
outputUGen.setOutput(buffer.getSampleData(i), numSamples, i);
}
outputLock.enter();
outputUGen.prepareAndProcessBlock(numSamples, blockID, -1);
outputLock.exit();
// quick and dirty metering...
channelLevel0 += buffer.getRMSLevel(UGenAudio::Output0, 0, buffer.getNumSamples());
channelLevel1 += buffer.getRMSLevel(UGenAudio::Output1, 0, buffer.getNumSamples());
numMeasurements++;
if(numMeasurements >= 4)
{
setMeterLevel(UGenInterface::Meters::OutL, channelLevel0);
setMeterLevel(UGenInterface::Meters::OutR, channelLevel1);
channelLevel0 = channelLevel1 = 0.f;
numMeasurements = 0;
}
blockID += numSamples;
}
示例7: processBlock
void JbcfilterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int samples = buffer.getNumSamples();
const int delayBufferSamples = delayBuffer.getNumSamples();
int dp = delayPosition;
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getSampleData (channel);
float* delayData = delayBuffer.getSampleData (channel);
dp = delayPosition;
for (int i = 0; i < samples; ++i)
{
const float in = channelData[i];
channelData[i] += delayData[dp];
if(distortionEnabledFlag) {
channelData[i] = channelData[i] * distortion;
if(channelData[i] > 0.03) {
channelData[i] = 0.03;
}
}
/*
channelData[i] -= (delayData[dp] +
(std::real(cB) * delayData[dp - 1]) +
(std::real(cC) * delayData[dp - 2]) +
(std::real(cD) * delayData[dp - 3]) +
(std::real(cE) * delayData[dp - 4])) * .0001;
*/
delayData[dp] = (delayData[dp] + in) * delay;
dp += 1;
if (dp >= delayBufferSamples)
dp = 0;
}
}
delayPosition = dp;
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例8: processBlock
void NewProjectAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
if(editorIsReady)
newNotesFromAnalyser->sendActionMessage("N");
if(analyseNewFile){
analyseNewFile = false;
MessageManager* mm = MessageManager::getInstance();
void* dummy = this;
void* d = mm->callFunctionOnMessageThread(loadNewWaveFile, dummy);
}
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
}
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
AudioPlayHead* playHead = getPlayHead();
AudioPlayHead::CurrentPositionInfo info;
playHead->getCurrentPosition(info);
float* channelData = buffer.getSampleData (0);
if(soundEditor != 0 && !loadingNewComponent)
soundEditor->getAudioSamplesToPlay(buffer, info.ppqPositionOfLastBarStart, getSampleRate(), currentSamplesPerBlock);
}
示例9: processBlock
void BiasedDelay::processBlock(AudioSampleBuffer& buffer, int numInputChannels,
int numOutputChannels, MidiBuffer& midiMessages){
// Atm we're assuming matching input/output channel counts
jassert(numInputChannels==numOutputChannels);
for (int channel=0; channel<numInputChannels; channel++)
{
processChannelBlock(buffer.getNumSamples(),
buffer.getSampleData(channel),
delayBuffer.getSampleData(channel),
delayBufferIdx);
}
delayBufferIdx = (delayBufferIdx + buffer.getNumSamples()) % getSampleDelay(getParameterValue(PARAMETER_TIME));
}
示例10: processBlock
void RingmodAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
if (stk::Stk::sampleRate()!=RingmodAudioProcessor::getSampleRate()) {
stk::Stk::setSampleRate(RingmodAudioProcessor::getSampleRate());
}
if(UserParams[MasterBypass]!=1){
float* leftData = buffer.getSampleData(0);
float* rightData = buffer.getSampleData(1);
for (int i=0; i<buffer.getNumSamples(); i++) {
*(leftData+i) = *(leftData+i) * (UserParams[Drive]+1.0f);
*(rightData+i) = *(rightData+i) * (UserParams[Drive]+1.0f);
if (UserParams[LFOAmount]>0) {
mySine.setFrequency((lin2Log(lowFreq, highFreq, UserParams[Frequency])* myLFO.getTick()) + (lin2Log(lowFreq, highFreq, UserParams[Frequency])));
}else{
mySine.setFrequency((lin2Log(lowFreq, highFreq, UserParams[Frequency])) + (lin2Log(lowFreq, highFreq, UserParams[Frequency])));
}
float tick = mySine.tick();
float leftEffect = *(leftData+i) * tick;
float rightEffect = *(rightData+i) * tick;
*(leftData+i)=(*(leftData+i) * (1.0f-UserParams[Mix])) + (UserParams[Mix] * leftEffect);
*(rightData+i)=(*(rightData+i) * (1.0f-UserParams[Mix])) + (UserParams[Mix] * rightEffect);
}
}
}
示例11: renderNextBlock
void renderNextBlock (AudioSampleBuffer& outputBuffer, int startSample, int numSamples)
{
if (angleDelta != 0.0)
{
while (--numSamples >= 0)
{
// Sum 3 oscillator outputs
const float currentSample =
(float) (sin (currentAngle) +
sin (currentAngle1) * env1->getOutput() +
sin (currentAngle2) * (1 - env1->getOutput())
) * env->getOutput();
for (int i = outputBuffer.getNumChannels(); --i >= 0;)
*outputBuffer.getSampleData(i, startSample) += currentSample;
// Increment oscillators
lfoAngle += lfoDelta;
currentAngle += angleDelta +
(1 + sin(lfoAngle)) * .005 * env1->getOutput();
currentAngle1 += angleDelta1 * env1->getOutput();
currentAngle2 += angleDelta2 * env1->getOutput() / 2;
if (currentAngle2 >= level / 4) {
currentAngle2 = level / 4 * -1;
}
// Push the envelope
env->stepEnvelope();
env1->stepEnvelope();
// And mess with the lfo
modifyLfo((1 - env1->getOutput()) * lfoPerSecond, &lfoDelta);
++startSample;
}
}
// Note is done playing when output is 0 and envelope is in release
if (env->getState() == env->ENV_RELEASE && env->getOutput() <= 0) {
clearCurrentNote();
angleDelta = 0.0;
}
}
示例12: writeFromAudioReader
bool AudioFormatWriter::writeFromAudioReader (AudioFormatReader& reader,
int64 startSample,
int64 numSamplesToRead)
{
const int bufferSize = 16384;
AudioSampleBuffer tempBuffer ((int) numChannels, bufferSize);
int* buffers [128] = { 0 };
for (int i = tempBuffer.getNumChannels(); --i >= 0;)
buffers[i] = reinterpret_cast<int*> (tempBuffer.getSampleData (i, 0));
if (numSamplesToRead < 0)
numSamplesToRead = reader.lengthInSamples;
while (numSamplesToRead > 0)
{
const int numToDo = (int) jmin (numSamplesToRead, (int64) bufferSize);
if (! reader.read (buffers, (int) numChannels, startSample, numToDo, false))
return false;
if (reader.usesFloatingPointData != isFloatingPoint())
{
int** bufferChan = buffers;
while (*bufferChan != nullptr)
{
void* const b = *bufferChan++;
if (isFloatingPoint())
FloatVectorOperations::convertFixedToFloat ((float*) b, (int*) b, 1.0f / 0x7fffffff, numToDo);
else
convertFloatsToInts ((int*) b, (float*) b, numToDo);
}
}
if (! write (const_cast <const int**> (buffers), numToDo))
return false;
numSamplesToRead -= numToDo;
startSample += numToDo;
}
return true;
}
示例13: processNextBlock
//==============================================================================
void SpectrumAnalyzerState::processNextBlock (AudioSampleBuffer& buffer)
{
const ScopedTryLock scopedTryLock (dataLock);
if (scopedTryLock.isLocked() && _dataA)
{
float *pA, *pB;
unsigned int m, n, nframes = buffer.getNumSamples();
pA = (_inputA >= 0) ? buffer.getSampleData (0) : 0;
pB = (_inputB >= 0) ? buffer.getSampleData (1) : 0;
m = nframes;
n = _size - _dind;
if (m >= n)
{
if (pA) memcpy (_dataA + _dind, pA, sizeof(float) * n);
else memset (_dataA + _dind, 0, sizeof(float) * n);
if (pB) memcpy (_dataB + _dind, pB, sizeof(float) * n);
else memset (_dataB + _dind, 0, sizeof(float) * n);
m -= n;
pA += n;
pB += n;
_dind = 0;
}
if (m)
{
if (pA) memcpy (_dataA + _dind, pA, sizeof(float) * m);
else memset (_dataA + _dind, 0, sizeof(float) * m);
if (pB) memcpy (_dataB + _dind, pB, sizeof(float) * m);
else memset (_dataB + _dind, 0, sizeof(float) * m);
_dind += m;
}
_scnt += nframes;
int k = _scnt / _step;
if (k) _scnt -= k * _step;
}
}
示例14: writeFromAudioSampleBuffer
bool AudioFormatWriter::writeFromAudioSampleBuffer (const AudioSampleBuffer& source, int startSample, int numSamples)
{
const int numSourceChannels = source.getNumChannels();
jassert (startSample >= 0 && startSample + numSamples <= source.getNumSamples() && numSourceChannels > 0);
if (startSample == 0)
return writeFromFloatArrays ((const float**) source.getArrayOfChannels(), numSourceChannels, numSamples);
const float* chans [256];
jassert ((int) numChannels < numElementsInArray (chans));
for (int i = 0; i < numSourceChannels; ++i)
chans[i] = source.getSampleData (i, startSample);
chans[numSourceChannels] = nullptr;
return writeFromFloatArrays (chans, numSourceChannels, numSamples);
}
示例15: processBlock
void FILTERCLASSNAME::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getSampleData (channel);
// ..do something to the data...
}
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}