本文整理汇总了C++中AudioSourceChannelInfo类的典型用法代码示例。如果您正苦于以下问题:C++ AudioSourceChannelInfo类的具体用法?C++ AudioSourceChannelInfo怎么用?C++ AudioSourceChannelInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioSourceChannelInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: throw
void DiskInUGenInternal::processBlock(bool& shouldDelete, const unsigned int /*blockID*/, const int /*channel*/) throw()
{
int numChannels = getNumChannels();
int blockSize = uGenOutput.getBlockSize();
for(int i = 0; i < numChannels; i++)
{
bufferData[i] = proxies[i]->getSampleData();
}
AudioSampleBuffer audioSampleBuffer(bufferData, numChannels, blockSize);
AudioSourceChannelInfo info;
info.buffer = &audioSampleBuffer;
info.numSamples = blockSize;
info.startSample = 0;
if(filePlayer.isPlaying())
filePlayer.getNextAudioBlock(info);
else
info.clearActiveBufferRegion();
if(isDone())
{
shouldDelete = shouldDelete ? true : shouldDeleteValue;
}
}
示例2: l
void SampleAudioSource::getNextAudioBlock(const AudioSourceChannelInfo& buf) {
ScopedLock l(lock_);
if (not (source_ and isRunning_)) {
buf.clearActiveBufferRegion();
return;
}
currentTime_ += buf.numSamples;
SampleTime overrun(currentTime_ - length_);
if (overrun < 0) {
source_->getNextAudioBlock(buf);
panGainPlayer_->apply(buf);
return;
}
AudioSourceChannelInfo b = buf;
b.numSamples -= overrun;
source_->getNextAudioBlock(b);
panGainPlayer_->apply(b);
b.startSample += b.numSamples;
b.numSamples = overrun;
b.clearActiveBufferRegion();
isRunning_ = false;
callback_(callbackData_);
// Might block - perhaps we should do this in another thread?
}
示例3: getNextAudioBlock
void LAudioAppComponent::getNextAudioBlock( const AudioSourceChannelInfo& bufferToFill ) {
if(stopped
|| !audioOpen
|| !hasCallback("getNextAudioBlock")
|| (audioOpen && audioSourcePlayer.getCurrentSource()==nullptr) )
{
bufferToFill.clearActiveBufferRegion();
return;
}
if(hasCallback("getNextAudioBlock")) {
MessageManagerLock mml (Thread::getCurrentThread());
if (! mml.lockWasGained()) {
DBG("CAN'T GET LOCK");
return; // another thread is trying to kill us!
}
LAudioSampleBuffer audioBuffer(Ls, *bufferToFill.buffer);
callback("getNextAudioBlock", 0, {
bufferToFill.startSample,
bufferToFill.numSamples,
bufferToFill.buffer->getNumChannels(),
new LRefBase("AudioSampleBuffer", &audioBuffer)
});
if(volume>-1) {
if(volume) bufferToFill.buffer->applyGain(volume);
else bufferToFill.clearActiveBufferRegion();
}
}
}
示例4: sl
void SequenceAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& info)
{
const ScopedLock sl (lock);
updatePlayingEvent (currentPlayingPosition);
int nextSamples = samplesToNextEvent (currentPlayingPosition);
if (currentPlayingPart >= 0
&& currentPlayingPart < list.size ())
{
int sampleToDo = jmax (-1, jmin (nextSamples, info.numSamples));
if (sampleToDo >= 0)
{
AudioSourceChannelInfo subInfo;
subInfo.buffer = info.buffer;
subInfo.startSample = info.startSample;
subInfo.numSamples = sampleToDo;
AudioEventHolder* event = list.getUnchecked (currentPlayingPart);
event->resampleSource->getNextAudioBlock (subInfo);
if (sampleToDo < info.numSamples)
{
DBG ("played last: " + String (sampleToDo) + " of " + String (info.numSamples));
subInfo.startSample = info.startSample + sampleToDo;
subInfo.numSamples = info.numSamples - sampleToDo;
currentPlayingPosition += subInfo.numSamples;
updatePlayingEvent (currentPlayingPosition);
if (currentPlayingPart >= 0
&& currentPlayingPart < list.size ())
{
AudioEventHolder* event = list.getUnchecked (currentPlayingPart);
event->resampleSource->getNextAudioBlock (subInfo);
}
else
{
subInfo.clearActiveBufferRegion();
}
DBG ("played last: " + String (subInfo.numSamples) + " of " + String (info.numSamples));
}
currentPlayingPosition += subInfo.numSamples;
}
else
{
info.clearActiveBufferRegion();
currentPlayingPosition += info.numSamples;
}
}
else
{
info.clearActiveBufferRegion();
currentPlayingPosition += info.numSamples;
}
}
示例5: tempBuffer
bool AudioFormatWriter::writeFromAudioSource (AudioSource& source,
int numSamplesToRead,
const int samplesPerBlock)
{
const int maxChans = 128;
AudioSampleBuffer tempBuffer (getNumChannels(), samplesPerBlock);
int* buffers [maxChans];
while (numSamplesToRead > 0)
{
const int numToDo = jmin (numSamplesToRead, samplesPerBlock);
AudioSourceChannelInfo info;
info.buffer = &tempBuffer;
info.startSample = 0;
info.numSamples = numToDo;
info.clearActiveBufferRegion();
source.getNextAudioBlock (info);
int i;
for (i = maxChans; --i >= 0;)
buffers[i] = 0;
for (i = tempBuffer.getNumChannels(); --i >= 0;)
buffers[i] = (int*) tempBuffer.getSampleData (i, 0);
if (! isFloatingPoint())
{
int** bufferChan = buffers;
while (*bufferChan != 0)
{
int* b = *bufferChan++;
// float -> int
for (int j = numToDo; --j >= 0;)
{
const double samp = *(const float*) b;
if (samp <= -1.0)
*b++ = INT_MIN;
else if (samp >= 1.0)
*b++ = INT_MAX;
else
*b++ = roundToInt (INT_MAX * samp);
}
}
}
if (! write ((const int**) buffers, numToDo))
return false;
numSamplesToRead -= numToDo;
}
return true;
}
示例6: getNextAudioBlock
// Audio Processing (split in "processAmbisonicBuffer" and "fillNextAudioBlock" to enable
// IR recording: using the same methods as the main thread)
void MainContentComponent::getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill)
{
// check if update required
if( updateNumFreqBandrequired ){
sourceImagesHandler.setFilterBankSize(numFreqBands);
updateNumFreqBandrequired = false;
// trigger general update: must re-dimension abs.coeffs and trigger update future->current, see in function
sourceImagesHandler.updateFromOscHandler(oscHandler);
}
// fill buffer with audiofile data
audioIOComponent.getNextAudioBlock(bufferToFill);
// execute main audio processing
if( !isRecordingIr )
{
processAmbisonicBuffer( bufferToFill.buffer );
if( audioRecorder.isRecording() ){ recordAmbisonicBuffer(); }
fillNextAudioBlock( bufferToFill.buffer );
}
// simply clear output buffer
else
{
bufferToFill.clearActiveBufferRegion();
}
// check if source images need update (i.e. update called by OSC handler
// while source images in the midst of a crossfade
if( sourceImageHandlerNeedsUpdate && sourceImagesHandler.crossfadeOver )
{
sourceImagesHandler.updateFromOscHandler(oscHandler);
requireDelayLineSizeUpdate = true;
sourceImageHandlerNeedsUpdate = false;
}
}
示例7: getNextAudioBlock
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override
{
bufferToFill.clearActiveBufferRegion();
const int bufferSize = buffer->getNumSamples();
const int samplesNeeded = bufferToFill.numSamples;
const int samplesToCopy = jmin (bufferSize - position, samplesNeeded);
if (samplesToCopy > 0)
{
int maxInChannels = buffer->getNumChannels();
int maxOutChannels = bufferToFill.buffer->getNumChannels();
if (! playAcrossAllChannels)
maxOutChannels = jmin (maxOutChannels, maxInChannels);
for (int i = 0; i < maxOutChannels; ++i)
bufferToFill.buffer->copyFrom (i, bufferToFill.startSample, *buffer,
i % maxInChannels, position, samplesToCopy);
}
position += samplesNeeded;
if (looping)
position %= bufferSize;
}
示例8: sl
void MixerAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& info)
{
const ScopedLock sl (lock);
if (inputs.size() > 0)
{
inputs.getUnchecked(0)->getNextAudioBlock (info);
if (inputs.size() > 1)
{
tempBuffer.setSize (jmax (1, info.buffer->getNumChannels()),
info.buffer->getNumSamples());
AudioSourceChannelInfo info2 (&tempBuffer, 0, info.numSamples);
for (int i = 1; i < inputs.size(); ++i)
{
inputs.getUnchecked(i)->getNextAudioBlock (info2);
for (int chan = 0; chan < info.buffer->getNumChannels(); ++chan)
info.buffer->addFrom (chan, info.startSample, tempBuffer, chan, 0, info.numSamples);
}
}
}
else
{
info.clearActiveBufferRegion();
}
}
示例9: getNextAudioBlock
void SampleSynthAudioSource::getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill)
{
bufferToFill.clearActiveBufferRegion();
MidiBuffer incomingMidi;
midiCollector_.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples);
process(*bufferToFill.buffer, incomingMidi, bufferToFill.numSamples);
}
示例10: getNextAudioBlock
void BufferingAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& info)
{
const ScopedLock sl (bufferStartPosLock);
const int validStart = (int) (jlimit (bufferValidStart, bufferValidEnd, nextPlayPos) - nextPlayPos);
const int validEnd = (int) (jlimit (bufferValidStart, bufferValidEnd, nextPlayPos + info.numSamples) - nextPlayPos);
if (validStart == validEnd)
{
// total cache miss
info.clearActiveBufferRegion();
}
else
{
if (validStart > 0)
info.buffer->clear (info.startSample, validStart); // partial cache miss at start
if (validEnd < info.numSamples)
info.buffer->clear (info.startSample + validEnd,
info.numSamples - validEnd); // partial cache miss at end
if (validStart < validEnd)
{
for (int chan = jmin (numberOfChannels, info.buffer->getNumChannels()); --chan >= 0;)
{
jassert (buffer.getNumSamples() > 0);
const int startBufferIndex = (int) ((validStart + nextPlayPos) % buffer.getNumSamples());
const int endBufferIndex = (int) ((validEnd + nextPlayPos) % buffer.getNumSamples());
if (startBufferIndex < endBufferIndex)
{
info.buffer->copyFrom (chan, info.startSample + validStart,
buffer,
chan, startBufferIndex,
validEnd - validStart);
}
else
{
const int initialSize = buffer.getNumSamples() - startBufferIndex;
info.buffer->copyFrom (chan, info.startSample + validStart,
buffer,
chan, startBufferIndex,
initialSize);
info.buffer->copyFrom (chan, info.startSample + validStart + initialSize,
buffer,
chan, 0,
(validEnd - validStart) - initialSize);
}
}
}
nextPlayPos += info.numSamples;
}
}
示例11: getNextAudioBlock
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override
{
// Your audio-processing code goes here!
// For more details, see the help for AudioProcessor::getNextAudioBlock()
// Right now we are not producing any data, in which case we need to clear the buffer
// (to prevent the output of random noise)
bufferToFill.clearActiveBufferRegion();
}
示例12: tempBuffer
bool AudioFormatWriter::writeFromAudioSource (AudioSource& source, int numSamplesToRead, const int samplesPerBlock)
{
AudioSampleBuffer tempBuffer (getNumChannels(), samplesPerBlock);
while (numSamplesToRead > 0)
{
const int numToDo = jmin (numSamplesToRead, samplesPerBlock);
AudioSourceChannelInfo info (&tempBuffer, 0, numToDo);
info.clearActiveBufferRegion();
source.getNextAudioBlock (info);
if (! writeFromAudioSampleBuffer (tempBuffer, 0, numToDo))
return false;
numSamplesToRead -= numToDo;
}
return true;
}
示例13: getNextAudioBlock
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill)
{
// the synth always adds its output to the audio buffer, so we have to clear it
// first..
bufferToFill.clearActiveBufferRegion();
// fill a midi buffer with incoming messages from the midi input.
MidiBuffer incomingMidi;
midiCollector.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples);
// and now get the synth to process the midi events and generate its output.
synth.renderNextBlock (*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples);
}
示例14: getNextAudioBlock
void DrumMachine::getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill) {
// std::cout << "drum machine!!" << std::endl;
auto& transport = audioEngine.getTransport();
if (transport.isPlaying()) {
int frameStartSamples = transport.getFrameStartSamples();
float frameStartTicks = transport.getFrameStartTicks();
float frameEndTicks = transport.getFrameEndTicks();
if ((int) frameStartTicks < (int) frameEndTicks) {
int tick = (int) frameEndTicks;
if (patternLength != 0) {
int ntick = tick % patternLength;
for (int voice = 0; voice < NUM_DRUM_VOICES; voice++) {
if (mute[voice])
continue;
if (pattern[voice][ntick] > 0) {
// we need to queue the appropriate note in the drum machine's synth.
int offset = transport.ticksToSamples(tick) - frameStartSamples;
if (offset > 0) {
MidiMessage msg = MidiMessage::noteOn(1, voice, (float) 1.0);
msg.setTimeStamp(offset);
midiCollector.addMessageToQueue(msg);
}
}
}
}
}
}
// the synth always adds its output to the audio buffer, so we have to clear it
// first..
bufferToFill.clearActiveBufferRegion();
// fill a midi buffer with incoming messages from the midi input.
MidiBuffer incomingMidi;
midiCollector.removeNextBlockOfMessages(incomingMidi, bufferToFill.numSamples);
// pass these messages to the keyboard state so that it can update the component
// to show on-screen which keys are being pressed on the physical midi keyboard.
// This call will also add midi messages to the buffer which were generated by
// the mouse-clicking on the on-screen keyboard.
keyboardState.processNextMidiBuffer(incomingMidi, 0, bufferToFill.numSamples, true);
// and now get the synth to process the midi events and generate its output.
synth.renderNextBlock(*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples);
bufferToFill.buffer->applyGain(0, 0, bufferToFill.numSamples, 0.2);
bufferToFill.buffer->applyGain(1, 0, bufferToFill.numSamples, 0.2);
}
示例15: getNextAudioBlock
void Source::getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill)
{
// calculations
float trackCutoff = parameters->getUnchecked(Controller::getParameterId(Controller::params::cutoff, trackId))->getScaledValue();
float trackDistort = parameters->getUnchecked(Controller::getParameterId(Controller::params::distort, trackId))->getScaledValue();
float trackLevel = parameters->getUnchecked(Controller::getParameterId(Controller::params::level, trackId))->getScaledValue();
float trackMute = parameters->getUnchecked(Controller::getParameterId(Controller::params::mute, trackId))->getScaledValue();
float globalCutoff = parameters->getUnchecked(Controller::getParameterId(Controller::params::cutoff))->getScaledValue();
float globalDistort = parameters->getUnchecked(Controller::getParameterId(Controller::params::distort))->getScaledValue();
float modulationCutoff = sampler.currentModulations != nullptr
? Parameter::scale(Controller::params::cutoff, true, sampler.currentModulations->getUnchecked(Mixer::mods::cutoff))
: 0.0;
float cutoff = fmax(0, fmin(1, trackCutoff + globalCutoff + modulationCutoff));
float distort = 1 - fmax(0, fmin(0.93, trackDistort + globalDistort));
float level = trackLevel * !trackMute;
// setup
bufferToFill.clearActiveBufferRegion();
midiCollector.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples);
// render sampler
sampler.renderNextBlock(*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples);
// dsp: distortion
float* outL = bufferToFill.buffer->getWritePointer (0, 0);
float* outR = bufferToFill.buffer->getWritePointer (1, 0);
for (int i=bufferToFill.numSamples; i>=0; --i) {
outL[i] = foldback(outL[i], distort);
outR[i] = foldback(outR[i], distort);
}
// dsp: filter
filterL.setCoefficients(IIRCoefficients::makeLowPass(sampleRate, MidiMessage::getMidiNoteInHertz(cutoff * 128)));
filterR.setCoefficients(IIRCoefficients::makeLowPass(sampleRate, MidiMessage::getMidiNoteInHertz(cutoff * 128)));
filterL.processSamples(bufferToFill.buffer->getWritePointer(0), bufferToFill.buffer->getNumSamples());
filterR.processSamples(bufferToFill.buffer->getWritePointer(1), bufferToFill.buffer->getNumSamples());
// dsp: level
bufferToFill.buffer->applyGainRamp(0, bufferToFill.numSamples, lastLevel, level);
lastLevel = level;
}