本文整理汇总了C++中AudioSampleBuffer类的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer类的具体用法?C++ AudioSampleBuffer怎么用?C++ AudioSampleBuffer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioSampleBuffer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processBlock
void SecondOrderIIRFilter::processBlock (AudioSampleBuffer& buffer)
{
for (int channel = 0; channel < buffer.getNumChannels(); ++channel)
{
float* samples = buffer.getSampleData (channel);
for (int i = 0; i < buffer.getNumSamples(); ++i)
{
const float in = samples[i];
double factorForB0 = in - a1 * z1[channel] - a2 * z2[channel];
double out = b0 * factorForB0
+ b1 * z1[channel]
+ b2 * z2[channel];
// This is copied from juce_IIRFilter.cpp, processSamples(),
// line 101.
#if JUCE_INTEL
if (!(out < -1.0e-8 || out > 1.0e-8))
out = 0.0;
#endif
z2.set(channel, z1[channel]);
z1.set(channel, factorForB0);
samples[i] = float(out);
}
}
}
示例2: addBlock
void CtrlrWaveform::addBlock (double sampleNumberInsource, const AudioSampleBuffer &newData, int startOffsetInBuffer, int numSamples)
{
int sampleToAddAt = 0;
if (audioBufferCopy.getNumSamples() == 0)
{
/* Initialize the buffer */
audioBufferCopy.setSize (newData.getNumChannels(), newData.getNumSamples());
}
else
{
/* it's already filled, just extend it */
sampleToAddAt = audioBufferCopy.getNumSamples();
if (newData.getNumChannels() > audioBufferCopy.getNumChannels())
{
audioBufferCopy.setSize (newData.getNumChannels(), audioBufferCopy.getNumSamples() + newData.getNumSamples(), true);
}
else
{
audioBufferCopy.setSize (audioBufferCopy.getNumChannels(), audioBufferCopy.getNumSamples() + newData.getNumSamples(), true);
}
}
for (int i=0; i<newData.getNumChannels(); i++)
{
audioBufferCopy.copyFrom (i, sampleToAddAt, newData, i, startOffsetInBuffer, numSamples);
}
audioThumbnail->addBlock (sampleNumberInsource, newData, startOffsetInBuffer, numSamples);
repaint();
}
示例3: processBlock
void PitchestimatorpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// In case we have more outputs than inputs, this code clears any output
// channels that didn't contain input data
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
int bufsize = buffer.getNumSamples();
//main process loop
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getWritePointer (channel);
fft->processForward(channelData, fftData, bufsize, nFFT);
buffer.applyGain (channel, 0, bufsize, gain);
}
for (int i=0; i<bufsize; i++) {
X[i] = fft->cartopolRadius(fftData[i][0], fftData[i][1]);
}
HS->generateCost(X, f0Area, numberOfHarmonics, bufsize, f0AreaSize, getSampleRate(), nFFT);
pitchEstimate = HS->estimatePitch(f0Area, f0AreaSize);
pitchText = String (pitchEstimate, 1);
}
示例4: processBlock
//this plugin can only handle one channel of input and has only one channel of output
void SpectralDelayPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
int numSamples = buffer.getNumSamples();
ScopedPointer<double> doubleInput = new double[numSamples];
ScopedPointer<double> doubleOutput = new double[numSamples];
ScopedPointer<float> functionOutput = new float[numSamples];
// ..do something to the data...
float* channelData = buffer.getSampleData (0);
//convert input data to doubles for now, ask on forums if it's possible use use doubles instead w/ AudioSampleBuffer otherwise rewrite filter's to use floats instead (maybe make a template for different numeric types)
for(int i = 0; i < numSamples; ++i)
{
doubleInput[i] = channelData[i];
functionOutput[i] = 0.0;
}
for(int i = 0; i < numFilters; ++i)
{
FFTfilter& currentFilter = *(filterVector[i]);
CircularBuffer<double>& currentDelayLine = *(delayLineVector[i]);
currentFilter.filter(doubleInput, doubleOutput, numSamples);
//copy filter output to the correct delay line and copy to the function's output at the same time
for(int j = 0; j < numSamples; ++j)
{
currentDelayLine.addData(doubleOutput[j]);
functionOutput[j] += float(currentDelayLine[delayAmounts[i]]);
}
}
//clear the buffer and copy the output data to it
buffer.clear();
buffer.copyFrom(0, 0, functionOutput, numSamples);
}
示例5: getNextAudioBlock
virtual void getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill)
{
AudioSampleBuffer* destBuffer = bufferToFill.buffer;
const int len = std::min(bufferToFill.numSamples, static_cast<int>(_len-_pos));
if (destBuffer)
{
for (int channel=0; channel<destBuffer->getNumChannels(); ++channel)
{
if (channel == 0 && _buffer)
{
destBuffer->copyFrom(channel, bufferToFill.startSample, _buffer+_pos, len);
if (len < bufferToFill.numSamples)
{
const int startClear = bufferToFill.startSample + len;
const int lenClear = bufferToFill.numSamples - len;
destBuffer->clear(startClear, lenClear);
}
}
else
{
destBuffer->clear(channel, bufferToFill.startSample, len);
}
}
}
_pos += len;
}
示例6: process
void FileReader::process (AudioSampleBuffer& buffer)
{
const int samplesNeededPerBuffer = int (float (buffer.getNumSamples()) * (getDefaultSampleRate() / m_sysSampleRate));
m_samplesPerBuffer.set(samplesNeededPerBuffer);
// FIXME: needs to account for the fact that the ratio might not be an exact
// integer value
// if cache window id == 0, we need to read and cache BUFFER_WINDOW_CACHE_SIZE more buffer windows
if (bufferCacheWindow == 0)
{
switchBuffer();
}
for (int i = 0; i < currentNumChannels; ++i)
{
// offset readBuffer index by current cache window count * buffer window size * num channels
input->processChannelData (*readBuffer + (samplesNeededPerBuffer * currentNumChannels * bufferCacheWindow),
buffer.getWritePointer (i, 0),
i,
samplesNeededPerBuffer);
}
setTimestampAndSamples(timestamp, samplesNeededPerBuffer);
timestamp += samplesNeededPerBuffer;
static_cast<FileReaderEditor*> (getEditor())->setCurrentTime(samplesToMilliseconds(startSample + timestamp % (stopSample - startSample)));
bufferCacheWindow += 1;
bufferCacheWindow %= BUFFER_WINDOW_CACHE_SIZE;
}
示例7: writeBuffer
void writeBuffer (const AudioSampleBuffer& buffer, Thread& thread)
{
jassert (buffer.getNumChannels() == bufferList.numChannels);
jassert (buffer.getNumSamples() < bufferList.numSamples * bufferList.numBuffers);
int offset = 0;
int numSamples = buffer.getNumSamples();
while (numSamples > 0)
{
int16* const destBuffer = bufferList.waitForFreeBuffer (thread);
if (destBuffer == nullptr)
break;
for (int i = 0; i < bufferList.numChannels; ++i)
{
typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::NonConst> DstSampleType;
typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> SrcSampleType;
DstSampleType dstData (destBuffer + i, bufferList.numChannels);
SrcSampleType srcData (buffer.getSampleData (i, offset));
dstData.convertSamples (srcData, bufferList.numSamples);
}
check ((*playerBufferQueue)->Enqueue (playerBufferQueue, destBuffer, bufferList.getBufferSizeBytes()));
bufferList.bufferSent();
numSamples -= bufferList.numSamples;
offset += bufferList.numSamples;
}
}
示例8: processBlock
void IAAEffectProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer&)
{
const float gain = *parameters.getRawParameterValue ("gain");
const int totalNumInputChannels = getTotalNumInputChannels();
const int totalNumOutputChannels = getTotalNumOutputChannels();
const int numSamples = buffer.getNumSamples();
for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
buffer.clear (i, 0, buffer.getNumSamples());
// Apply the gain to the samples using a ramp to avoid discontinuities in
// the audio between processed buffers.
for (int channel = 0; channel < totalNumInputChannels; ++channel)
{
buffer.applyGainRamp (channel, 0, numSamples, previousGain, gain);
meterListeners.call (&IAAEffectProcessor::MeterListener::handleNewMeterValue,
channel,
buffer.getMagnitude (channel, 0, numSamples));
}
previousGain = gain;
// Now ask the host for the current time so we can store it to be displayed later.
updateCurrentTimeInfoFromHost (lastPosInfo);
}
示例9: processBlock
void JuceVibAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
if (bypassed) {
processBlockBypassed(buffer, midiMessages);
}
else {
const int totalNumInputChannels = getTotalNumInputChannels();
const int totalNumOutputChannels = getTotalNumOutputChannels();
//Set parameters
lfoFreq = freqParam->get();
lfoAmp = depthParam->get();
Vib->setFreq(lfoFreq*maxFreq);
Vib->setDepth(lfoAmp);
// In case we have more outputs than inputs, this code clears any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
// This is here to avoid people getting screaming feedback
// when they first compile a plugin, but obviously you don't need to keep
// this code if your algorithm always overwrites all the output channels.
for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
buffer.clear(i, 0, buffer.getNumSamples());
float** ppfWriteBuffer = buffer.getArrayOfWritePointers();
Vib->process(ppfWriteBuffer, ppfWriteBuffer, buffer.getNumSamples());
}
}
示例10: handleIncomingMidiBuffer
void Pfm2AudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
handleIncomingMidiBuffer(midiMessages, buffer.getNumSamples());
// Clear sound
for (int i = 0; i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
// dispatch realtime events to non realtime observer
parameterSet.processRealtimeEvents();
midiMessageCollector.removeNextBlockOfMessages(midiMessages, buffer.getNumSamples());
/*
if (midiMessages.getNumEvents() > 0) {
printf("processBlock : %d midi messages \n", midiMessages.getNumEvents());
}
*/
if (parametersToUpdate.size() > 0 ) {
if (parametersToUpdateMutex.try_lock()) {
std::unordered_set<const char*> newSet;
newSet.swap(parametersToUpdate);
parametersToUpdateMutex.unlock();
if (pfm2Editor) {
pfm2Editor->updateUIWith(newSet);
}
}
}
}
示例11: readNextBlock
void readNextBlock (AudioSampleBuffer& buffer, Thread& thread)
{
jassert (buffer.getNumChannels() == bufferList.numChannels);
jassert (buffer.getNumSamples() < bufferList.numSamples * bufferList.numBuffers);
jassert ((buffer.getNumSamples() % bufferList.numSamples) == 0);
int offset = 0;
int numSamples = buffer.getNumSamples();
while (numSamples > 0)
{
int16* const srcBuffer = bufferList.waitForFreeBuffer (thread);
if (srcBuffer == nullptr)
break;
for (int i = 0; i < bufferList.numChannels; ++i)
{
typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst> DstSampleType;
typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::Const> SrcSampleType;
DstSampleType dstData (buffer.getSampleData (i, offset));
SrcSampleType srcData (srcBuffer + i, bufferList.numChannels);
dstData.convertSamples (srcData, bufferList.numSamples);
}
enqueueBuffer (srcBuffer);
numSamples -= bufferList.numSamples;
offset += bufferList.numSamples;
}
}
示例12:
void Csc344filterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
int channel, dp = 0;
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
for (channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getSampleData (channel);
float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1));
dp = delayPosition;
for (int i = 0; i < numSamples; ++i)
{
const float in = channelData[i];
channelData[i] += delayData[dp];
delayData[dp] = (delayData[dp] + in) * delay;
if (++dp >= delayBuffer.getNumSamples())
dp = 0;
}
}
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例13: source
bool AudioCDBurner::addAudioTrack (AudioSource* audioSource, int numSamples)
{
if (audioSource == 0)
return false;
ScopedPointer<AudioSource> source (audioSource);
long bytesPerBlock;
HRESULT hr = pimpl->redbook->GetAudioBlockSize (&bytesPerBlock);
const int samplesPerBlock = bytesPerBlock / 4;
bool ok = true;
hr = pimpl->redbook->CreateAudioTrack ((long) numSamples / (bytesPerBlock * 4));
HeapBlock <byte> buffer (bytesPerBlock);
AudioSampleBuffer sourceBuffer (2, samplesPerBlock);
int samplesDone = 0;
source->prepareToPlay (samplesPerBlock, 44100.0);
while (ok)
{
{
AudioSourceChannelInfo info;
info.buffer = &sourceBuffer;
info.numSamples = samplesPerBlock;
info.startSample = 0;
sourceBuffer.clear();
source->getNextAudioBlock (info);
}
buffer.clear (bytesPerBlock);
typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian,
AudioData::Interleaved, AudioData::NonConst> CDSampleFormat;
typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian,
AudioData::NonInterleaved, AudioData::Const> SourceSampleFormat;
CDSampleFormat left (buffer, 2);
left.convertSamples (SourceSampleFormat (sourceBuffer.getSampleData (0)), samplesPerBlock);
CDSampleFormat right (buffer + 2, 2);
right.convertSamples (SourceSampleFormat (sourceBuffer.getSampleData (1)), samplesPerBlock);
hr = pimpl->redbook->AddAudioTrackBlocks (buffer, bytesPerBlock);
if (FAILED (hr))
ok = false;
samplesDone += samplesPerBlock;
if (samplesDone >= numSamples)
break;
}
hr = pimpl->redbook->CloseAudioTrack();
return ok && hr == S_OK;
}
示例14: addBlock
void AudioThumbnail::addBlock (const int64 startSample, const AudioSampleBuffer& incoming,
int startOffsetInBuffer, int numSamples)
{
jassert (startSample >= 0);
const int firstThumbIndex = (int) (startSample / samplesPerThumbSample);
const int lastThumbIndex = (int) ((startSample + numSamples + (samplesPerThumbSample - 1)) / samplesPerThumbSample);
const int numToDo = lastThumbIndex - firstThumbIndex;
if (numToDo > 0)
{
const int numChans = jmin (channels.size(), incoming.getNumChannels());
const HeapBlock<MinMaxValue> thumbData ((size_t) (numToDo * numChans));
const HeapBlock<MinMaxValue*> thumbChannels ((size_t) numChans);
for (int chan = 0; chan < numChans; ++chan)
{
const float* const sourceData = incoming.getSampleData (chan, startOffsetInBuffer);
MinMaxValue* const dest = thumbData + numToDo * chan;
thumbChannels [chan] = dest;
for (int i = 0; i < numToDo; ++i)
{
float low, high;
const int start = i * samplesPerThumbSample;
FloatVectorOperations::findMinAndMax (sourceData + start, jmin (samplesPerThumbSample, numSamples - start), low, high);
dest[i].setFloat (low, high);
}
}
setLevels (thumbChannels, firstThumbIndex, numChans, numToDo);
}
}
示例15: processBlock
void Plugin::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
if (getNumInputChannels() != 2 && getNumOutputChannels() != 2) {
return;
}
float* chan1 = buffer.getWritePointer(0);
float* chan2 = buffer.getWritePointer(1);
int sampleframes = buffer.getNumSamples();
int blocks = sampleframes / kInternalBlocksize;
if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(pos)) {
if ((&pos)->bpm == 0.0f) {
parameters->setQuantizationDisabled();
parameters->setParameter(kDelayQuant, 0.0f, false);
parameters->setParameter(kIotQuant, 0.0f, false);
parameters->setParameter(kDurQuant, 0.0f, false);
}
else
parameters->time_quantizer->setPositionInfo(&pos);
} else {
parameters->setQuantizationDisabled();
}
block_sample_pos = 0;
for (int i = 0; i < blocks; i++) {
granulator->processInternalBlock(chan1, chan2, kInternalBlocksize);
chan1 += kInternalBlocksize;
chan2 += kInternalBlocksize;
parameters->time_quantizer->incrementPositionInfo();
}
int samples_remaining = sampleframes % kInternalBlocksize;
if (samples_remaining) {
granulator->processInternalBlock(chan1, chan2, samples_remaining);
}
}