本文整理汇总了C++中AudioSampleBuffer::copyFrom方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer::copyFrom方法的具体用法?C++ AudioSampleBuffer::copyFrom怎么用?C++ AudioSampleBuffer::copyFrom使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSampleBuffer
的用法示例。
在下文中一共展示了AudioSampleBuffer::copyFrom方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: readAllFromBuffer
int DataBuffer::readAllFromBuffer(AudioSampleBuffer& data, uint64* timestamp, int16* eventCodes, int maxSize)
{
// check to see if the maximum size is smaller than the total number of available ints
// Better version (1/27/14)?
int numReady = abstractFifo.getNumReady();
int numItems = (maxSize < numReady ) ? maxSize : numReady;
// Original version:
//int numItems = (maxSize < abstractFifo.getNumReady()) ?
// maxSize : abstractFifo.getNumReady();
int startIndex1, blockSize1, startIndex2, blockSize2;
abstractFifo.prepareToRead(numItems, startIndex1, blockSize1, startIndex2, blockSize2);
if (blockSize1 > 0)
{
for (int chan = 0; chan < data.getNumChannels(); chan++)
{
data.copyFrom(chan, // destChan
0, // destStartSample
buffer, // source
chan, // sourceChannel
startIndex1, // sourceStartSample
blockSize1); // numSamples
}
memcpy(timestamp, timestampBuffer+startIndex1, 8);
memcpy(eventCodes, eventCodeBuffer+startIndex1, blockSize1*2);
}
else
{
memcpy(timestamp, timestampBuffer+startIndex2, 8);
}
if (blockSize2 > 0)
{
for (int chan = 0; chan < data.getNumChannels(); chan++)
{
data.copyFrom(chan, // destChan
blockSize1, // destStartSample
buffer, // source
chan, // sourceChannel
startIndex2, // sourceStartSample
blockSize2); // numSamples
}
memcpy(eventCodes + blockSize1, eventCodeBuffer+startIndex2, blockSize2*2);
}
abstractFifo.finishedRead(numItems);
return numItems;
}
示例2: processBlock
void OOTrack::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
updatePendingLooperState(OOServer::getInstance()->looperState.timeInSamples);
OOServer::getInstance()->updateGlobalTime(this,buffer.getNumSamples());
// RECORDING
if (*isRecording)
{
if(recordNeedle.get() + buffer.getNumSamples()> getSampleRate() * MAX_LOOP_LENGTH_S){
*shouldRecord = false;
*isRecording = false;
};
monoLoopSample.copyFrom(0, recordNeedle.get(), buffer, 0, 0, buffer.getNumSamples());
recordNeedle += buffer.getNumSamples();
rmsOut = (1-rmsAlpha)*rmsOut+ rmsAlpha*buffer.getRMSLevel(0, 0, buffer.getNumSamples());
}
else{
streamBipBuffer.writeBlock(buffer);
}
// PLAYING
// allow circular reading , although not sure that overflow need to be handled as its written with same block sizes than read
// we may need it if we start to use a different clock than looperState in OOServer that has a granularity of blockSize
// or if we dynamicly change blockSize
if (*isPlaying && recordNeedle.get()>0 && monoLoopSample.getNumSamples())
{
if ( (playNeedle + buffer.getNumSamples()) > recordNeedle.get())
{
//assert false for now see above
// jassert(false);
int firstSegmentLength = recordNeedle.get() - playNeedle;
int secondSegmentLength = buffer.getNumSamples() - firstSegmentLength;
buffer.copyFrom(0, 0, monoLoopSample, 0, playNeedle, firstSegmentLength);
buffer.copyFrom(0, 0, monoLoopSample, 0, 0, secondSegmentLength);
playNeedle = secondSegmentLength;
}else{
buffer.copyFrom(0, 0, monoLoopSample, 0, playNeedle, buffer.getNumSamples());
playNeedle += buffer.getNumSamples();
playNeedle %= recordNeedle.get();
}
buffer.applyGainRamp(0, 0, buffer.getNumSamples(), lastVolume,*volume);
lastVolume = *volume;
rmsOut = (1-rmsAlpha)*rmsOut+ rmsAlpha*buffer.getRMSLevel(0, 0, buffer.getNumSamples());
}
else{
// silence output buffer
buffer.applyGain(0, 0, buffer.getNumSamples(), 0);
}
}
示例3: processBlock
void GainLawsAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
const int numTracks = getNumInputChannels();
if (pluginON)
{
if (panMode == 0)
{
L = C;
R = 1-C;
}
else if (panMode == 1)
{
L = sqrt(C);
R = sqrt(1-C);
}
else if (panMode == 2)
{
L = sin(C*(M_PI/2));
R = cos(C*(M_PI/2));
}
// Apply gain
if (numTracks == 1)
{
buffer.clear(1,0,numSamples);
buffer.copyFrom(1,0,buffer.getSampleData(0),numSamples);
}
buffer.applyGain(0,0,numSamples,(float)R);
buffer.applyGain(1,0,numSamples,(float)L);
}
}
示例4: getNextAudioBlock
virtual void getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill)
{
AudioSampleBuffer* destBuffer = bufferToFill.buffer;
const int len = std::min(bufferToFill.numSamples, static_cast<int>(_len-_pos));
if (destBuffer)
{
for (int channel=0; channel<destBuffer->getNumChannels(); ++channel)
{
if (channel == 0 && _buffer)
{
destBuffer->copyFrom(channel, bufferToFill.startSample, _buffer+_pos, len);
if (len < bufferToFill.numSamples)
{
const int startClear = bufferToFill.startSample + len;
const int lenClear = bufferToFill.numSamples - len;
destBuffer->clear(startClear, lenClear);
}
}
else
{
destBuffer->clear(channel, bufferToFill.startSample, len);
}
}
}
_pos += len;
}
示例5: processBlock
//this plugin can only handle one channel of input and has only one channel of output
void SpectralDelayPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
int numSamples = buffer.getNumSamples();
ScopedPointer<double> doubleInput = new double[numSamples];
ScopedPointer<double> doubleOutput = new double[numSamples];
ScopedPointer<float> functionOutput = new float[numSamples];
// ..do something to the data...
float* channelData = buffer.getSampleData (0);
//convert input data to doubles for now, ask on forums if it's possible use use doubles instead w/ AudioSampleBuffer otherwise rewrite filter's to use floats instead (maybe make a template for different numeric types)
for(int i = 0; i < numSamples; ++i)
{
doubleInput[i] = channelData[i];
functionOutput[i] = 0.0;
}
for(int i = 0; i < numFilters; ++i)
{
FFTfilter& currentFilter = *(filterVector[i]);
CircularBuffer<double>& currentDelayLine = *(delayLineVector[i]);
currentFilter.filter(doubleInput, doubleOutput, numSamples);
//copy filter output to the correct delay line and copy to the function's output at the same time
for(int j = 0; j < numSamples; ++j)
{
currentDelayLine.addData(doubleOutput[j]);
functionOutput[j] += float(currentDelayLine[delayAmounts[i]]);
}
}
//clear the buffer and copy the output data to it
buffer.clear();
buffer.copyFrom(0, 0, functionOutput, numSamples);
}
示例6: audioDeviceIOCallback
void AudioFilterStreamer::audioDeviceIOCallback (const float** inputChannelData,
int totalNumInputChannels,
float** outputChannelData,
int totalNumOutputChannels,
int numSamples)
{
MidiBuffer midiBuffer;
midiCollector.removeNextBlockOfMessages (midiBuffer, numSamples);
int i, numActiveInChans = 0, numActiveOutChans = 0;
int numOutsWanted = filter.getNumOutputChannels();
const int numInsWanted = filter.getNumInputChannels();
for (i = 0; i < totalNumInputChannels; ++i)
if (inputChannelData[i] != 0)
inChans [numActiveInChans++] = (float*) inputChannelData[i];
while (numActiveInChans < numInsWanted)
inChans [numActiveInChans++] = emptyBuffer.getSampleData (0, 0);
for (i = 0; i < totalNumOutputChannels; ++i)
if (outputChannelData[i] != 0)
outChans [numActiveOutChans++] = outputChannelData[i];
i = 0;
while (numActiveOutChans < numOutsWanted)
outChans [numActiveOutChans++] = emptyBuffer.getSampleData (++i, 0);
AudioSampleBuffer input (inChans, jmin (numInsWanted, numActiveInChans), numSamples);
AudioSampleBuffer output (outChans, jmin (numOutsWanted, numActiveOutChans), numSamples);
{
const ScopedLock sl (filter.getCallbackLock());
if (filter.isSuspended())
{
output.clear();
}
else
{
for (int i = jmin (output.getNumChannels(), input.getNumChannels()); --i >= 0;)
output.copyFrom (i, 0, input, i, 0, numSamples);
filter.processBlock (output, midiBuffer);
}
}
while (numOutsWanted < numActiveOutChans)
zeromem (outChans[numOutsWanted++], sizeof (float) * numSamples);
}
示例7: processBlock
//==============================================================================
void WrappedJucePlugin::processBlock (AudioSampleBuffer& buffer,
MidiBuffer& midiMessages)
{
const int blockSize = buffer.getNumSamples ();
if (instance)
{
// Juce plugins put their input into (passed in) buffer, so we need to copy this out from the Jost inputBuffer
for (int i = 0; i < getNumInputs(); i++)
buffer.copyFrom(i, 0, *inputBuffer, i, 0, buffer.getNumSamples());
// Similar for midi input
MidiBuffer dud;
MidiBuffer* midiBuffer = &dud;
if (midiBuffers.size() > 0)
{
midiBuffer = midiBuffers.getUnchecked (0);
// add events from keyboards
keyboardState.processNextMidiBuffer(*midiBuffer, 0, blockSize, true);
// process midi automation
midiAutomatorManager.handleMidiMessageBuffer(*midiBuffer);
}
// apply a midi filter on the input to the synth if one is set
MidiFilter* synthInputFilter = getSynthInputChannelFilter();
if (synthInputFilter)
{
MidiManipulator manip;
manip.setMidiFilter(synthInputFilter);
manip.processEvents(*midiBuffer, blockSize);
}
// Call through to Juce plugin instance to get the VST to actually do its thing!
instance->processBlock(buffer, *midiBuffer);
// haven't worked out what jiggerying needs to be done to the midi output yet
// Juce plugins put their output into (passed in) buffer, so we need to copy this out into the Jost outputBuffer
for (int i = 0; i < getNumOutputs(); i++)
outputBuffer->copyFrom(i, 0, buffer, i, 0, buffer.getNumSamples());
}
}
示例8: process
void ChannelMappingNode::process(AudioSampleBuffer& buffer,
MidiBuffer& midiMessages)
{
int j=0;
int i=0;
int realChan;
// use copy constructor to set the data to refer to
channelBuffer = buffer;
// buffer.clear();
while (j < settings.numOutputs)
{
realChan = channelArray[i];
if ((realChan < channelBuffer.getNumChannels()) && (enabledChannelArray[realChan]))
{
// copy it back into the buffer according to the channel mapping
buffer.copyFrom(j, // destChannel
0, // destStartSample
channelBuffer.getReadPointer(realChan), // source
getNumSamples(j), // numSamples
1.0f // gain to apply to source (positive for original signal)
);
// now do the referencing
if ((referenceArray[realChan] > -1) && (referenceChannels[referenceArray[realChan]] > -1)
&& (referenceChannels[referenceArray[realChan]] < channelBuffer.getNumChannels()))
{
buffer.addFrom(j, // destChannel
0, // destStartSample
channelBuffer, // source
channels[referenceChannels[referenceArray[realChan]]]->index-1, // sourceChannel
0, // sourceStartSample
getNumSamples(j), // numSamples
-1.0f // gain to apply to source (negative for reference)
);
}
j++;
}
i++;
}
}
示例9: processBlock
void processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
if (initialised && plugin != nullptr && handle != nullptr)
{
for (int i = 0; i < inputs.size(); ++i)
plugin->connect_port (handle, inputs[i],
i < buffer.getNumChannels() ? buffer.getWritePointer (i) : nullptr);
if (plugin->run != nullptr)
{
for (int i = 0; i < outputs.size(); ++i)
plugin->connect_port (handle, outputs.getUnchecked(i),
i < buffer.getNumChannels() ? buffer.getWritePointer (i) : nullptr);
plugin->run (handle, numSamples);
return;
}
if (plugin->run_adding != nullptr)
{
tempBuffer.setSize (outputs.size(), numSamples);
tempBuffer.clear();
for (int i = 0; i < outputs.size(); ++i)
plugin->connect_port (handle, outputs.getUnchecked(i), tempBuffer.getWritePointer (i));
plugin->run_adding (handle, numSamples);
for (int i = 0; i < outputs.size(); ++i)
if (i < buffer.getNumChannels())
buffer.copyFrom (i, 0, tempBuffer, i, 0, numSamples);
return;
}
jassertfalse; // no callback to use?
}
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, numSamples);
}
示例10: processBlock
void processBlock (AudioSampleBuffer& buffer, MidiBuffer&) override
{
// copy the input into a scratch buffer
AudioSampleBuffer scratch (scratchBuffer.getArrayOfWritePointers(), 1, buffer.getNumSamples());
scratch.copyFrom(0, 0, buffer, 0, 0, buffer.getNumSamples());
const Array<SpeakerPosition>& positions = speakerPositions.getReference (currentSpeakerLayout).positions;
const float* inputBuffer = scratch.getReadPointer (0);
const float kMaxDistanceGain = -20.0f;
for (int speakerIdx = 0; speakerIdx < positions.size(); ++speakerIdx)
{
const SpeakerPosition& speakerPos = positions.getReference (speakerIdx);
float fltDistance = distance (polarToCartesian (speakerPos.radius, speakerPos.phi), polarToCartesian (*radius, (*phi) * 2.0f * float_Pi));
float gainInDb = kMaxDistanceGain * (fltDistance / 2.0f);
float gain = std::pow (10.0f, (gainInDb / 20.0f));
busArrangement.getBusBuffer(buffer, false, 0).copyFrom(speakerIdx, 0, inputBuffer, buffer.getNumSamples(), gain);
}
}
示例11: processBlock
void StereoChorusAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
if (getNumInputChannels() == 1) buffer.copyFrom(1, 0, buffer, 0, 0, buffer.getNumSamples());
//float* channelData = buffer.getSampleData (0);
for (int i = 0; i < buffer.getNumSamples(); i++) {
float leftMod = (leftOsc.nextSample()+1.01) * getParameter(modParam) * 100;
float rightMod = (leftOsc.nextSample()+1.01) * getParameter(modParam) * 100;
leftDelayTime = (getParameter(delayParam) * 200) + leftMod + .002;
rightDelayTime = (getParameter(delayParam) * 220) + rightMod + .0015;
float l_xn = buffer.getReadPointer(0)[i];
float r_xn = buffer.getReadPointer(1)[i];
float l_combined;
float r_combined;
float l_yn;
float r_yn;
l_yn = leftBuffer.getSample(leftDelayTime);
r_yn = rightBuffer.getSample(rightDelayTime);
l_combined = l_xn + r_yn * getParameter(feedbackParam);
r_combined = r_xn + l_yn * getParameter(feedbackParam);
leftBuffer.addSample(l_combined);
rightBuffer.addSample(r_combined);
buffer.getWritePointer(0)[i] = (l_xn * (1-getParameter(mixParam)) + l_yn * getParameter(mixParam));
buffer.getWritePointer(1)[i] = (r_xn * (1-getParameter(mixParam)) + r_yn * getParameter(mixParam));
}
}
示例12: processBlock
void Mcfx_delayAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// compute read position
_buf_read_pos = _buf_write_pos - _delay_smpls;
if (_buf_read_pos < 0)
_buf_read_pos = _buf_size + _buf_read_pos - 1;
// std::cout << "size : " << _buf_size << " read pos: " << _buf_read_pos << std::endl;
// resize buffer if necessary
if (_delay_buffer.getNumChannels() < buffer.getNumChannels() || _delay_buffer.getNumSamples() < _buf_size) {
// resize buffer
_delay_buffer.setSize(buffer.getNumChannels(), _buf_size, true, true, false);
}
// write to the buffer
if (_buf_write_pos + buffer.getNumSamples() < _buf_size)
{
for (int ch = 0; ch < buffer.getNumChannels(); ch++)
{
// copy straight into buffer
_delay_buffer.copyFrom(ch, _buf_write_pos, buffer, ch, 0, buffer.getNumSamples());
}
// update write position
_buf_write_pos += buffer.getNumSamples();
} else { // if buffer reaches end
int samples_to_write1 = _buf_size - _buf_write_pos;
int samples_to_write2 = buffer.getNumSamples() - samples_to_write1;
// std::cout << "spl_write1: " << samples_to_write1 << " spl_write2: " << samples_to_write2 << std::endl;
for (int ch = 0; ch < buffer.getNumChannels(); ch++)
{
// copy until end
_delay_buffer.copyFrom(ch, _buf_write_pos, buffer, ch, 0, samples_to_write1);
// start copy to front
_delay_buffer.copyFrom(ch, 0, buffer, ch, samples_to_write1, samples_to_write2);
}
// update write position
_buf_write_pos = samples_to_write2;
}
// read from buffer
if (_buf_read_pos + buffer.getNumSamples() < _buf_size)
{
for (int ch = 0; ch < buffer.getNumChannels(); ch++)
{
buffer.copyFrom(ch, 0, _delay_buffer, ch, _buf_read_pos, buffer.getNumSamples());
}
// update read position
_buf_read_pos += buffer.getNumSamples();
} else {
int samples_to_read1 = _buf_size - _buf_read_pos;
int samples_to_read2 = buffer.getNumSamples() - samples_to_read1;
for (int ch = 0; ch < buffer.getNumChannels(); ch++)
{
// copy until end
buffer.copyFrom(ch, 0, _delay_buffer, ch, _buf_read_pos, samples_to_read1);
// start copy from front
buffer.copyFrom(ch, samples_to_read1, _delay_buffer, ch, 0, samples_to_read2);
}
// update write position
_buf_read_pos = samples_to_read2;
}
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
}
示例13: processBlock
void DemoJuceFilter::processBlock (AudioSampleBuffer& buffer,
MidiBuffer& midiMessages)
{
/*
// for each of our input channels, we'll attenuate its level by the
// amount that our volume parameter is set to.
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
// mix in opposite ratio of noise (i.e. generator)
float* sampleData = buffer.getSampleData(channel);
for (int sample = 0; sample < buffer.getNumSamples(); sample++)
sampleData[sample] = (rand() / static_cast<float>(RAND_MAX)) * (1.0 - gain) + sampleData[sample];
}
// in case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
{
buffer.clear (i, 0, buffer.getNumSamples());
}
*/
// if any midi messages come in, use them to update the keyboard state object. This
// object sends notification to the UI component about key up/down changes
keyboardState.processNextMidiBuffer (midiMessages,
0, buffer.getNumSamples(),
true);
// have a go at getting the current time from the host, and if it's changed, tell
// our UI to update itself.
AudioPlayHead::CurrentPositionInfo pos;
if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
{
if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
{
lastPosInfo = pos;
sendChangeMessage (this);
}
}
else
{
zeromem (&lastPosInfo, sizeof (lastPosInfo));
lastPosInfo.timeSigNumerator = 4;
lastPosInfo.timeSigDenominator = 4;
lastPosInfo.bpm = 120;
}
if (ptrPlug && ladspa)
{
int blockSize = buffer.getNumSamples();
// convert midi messages internally
midiManager.convertMidiMessages (midiMessages, blockSize);
// connect ports
// for (int i = 0; i < ins.size (); i++)
// ladspa->connect_port (plugin, ins [i], inputBuffer->getSampleData (i));
for (int i = 0; i < outs.size (); i++)
{
ladspa->connect_port (plugin, outs [i], buffer.getSampleData (i));
// std::cerr << " connecting output " << i << std::endl;
}
if (ptrPlug->run_synth)
{
ptrPlug->run_synth (plugin,
blockSize,
midiManager.getMidiEvents (),
midiManager.getMidiEventsCount ());
// now paste the data into the right channel
// not generic, this assumes we are a mono plugin and we've claimed 2 output channels
buffer.copyFrom(1, 0, buffer, 0, 0, blockSize);
return;
}
else if (ptrPlug->run_synth_adding)
{
buffer.clear ();
ptrPlug->run_synth_adding (plugin,
blockSize,
midiManager.getMidiEvents (),
midiManager.getMidiEventsCount ());
// now paste the data into the right channel
// not generic, this assumes we are a mono plugin and we've claimed 2 output channels
buffer.copyFrom(1, 0, buffer, 0, 0, blockSize);
return;
}
// run ladspa if present as
if (ladspa->run)
{
ladspa->run (plugin, blockSize);
}
else if (ladspa->run_adding)
//.........这里部分代码省略.........