本文整理汇总了C++中AudioBuffer类的典型用法代码示例。如果您正苦于以下问题:C++ AudioBuffer类的具体用法?C++ AudioBuffer怎么用?C++ AudioBuffer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioBuffer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processAudio
void processAudio(AudioBuffer& buf){
float minf = getParameterValue(PARAMETER_A)*0.1 + 0.001;
float maxf = min(0.4, minf + getParameterValue(PARAMETER_B)*0.2);
// range should be exponentially related to minf
// int tones = getParameterValue(PARAMETER_C)*(TONES-1) + 1;
int tones = 12;
float spread = getParameterValue(PARAMETER_C) + 1.0;
float rate = 1.0 + (getParameterValue(PARAMETER_D) - 0.5)*0.00002;
int size = buf.getSize();
FloatArray out = buf.getSamples(LEFT_CHANNEL);
float amp;
for(int t=1; t<tones; ++t)
inc[t] = inc[t-1]*spread;
for(int i=0; i<size; ++i){
for(int t=0; t<tones; ++t){
amp = getAmplitude((inc[t]-minf)/(maxf-minf));
out[i] += amp * getWave(acc[t]);
acc[t] += inc[t];
if(acc[t] > 1.0)
acc[t] -= 1.0;
else if(acc[t] < 0.0)
acc[t] += 1.0;
inc[t] *= rate;
}
}
if(inc[0] > maxf)
inc[0] = minf;
// while(inc[0] > minf)
// inc[0] *= 0.5;
else if(inc[0] < minf)
inc[0] = maxf;
// while(inc[0] < maxf)
// inc[0] *= 2.0;
}
示例2: sizeof
void
JackLayer::read(AudioBuffer &buffer)
{
for (unsigned i = 0; i < in_ringbuffers_.size(); ++i) {
const size_t incomingSamples = jack_ringbuffer_read_space(in_ringbuffers_[i]) / sizeof(captureFloatBuffer_[0]);
if (!incomingSamples)
continue;
captureFloatBuffer_.resize(incomingSamples);
buffer.resize(incomingSamples);
// write to output
const size_t from_ringbuffer = jack_ringbuffer_read_space(in_ringbuffers_[i]);
const size_t expected_bytes = std::min(incomingSamples * sizeof(captureFloatBuffer_[0]), from_ringbuffer);
// FIXME: while we have samples to write AND while we have space to write them
const size_t read_bytes = jack_ringbuffer_read(in_ringbuffers_[i],
(char *) captureFloatBuffer_.data(), expected_bytes);
if (read_bytes < expected_bytes) {
RING_WARN("Dropped %zu bytes", expected_bytes - read_bytes);
break;
}
/* Write the data one frame at a time. This is
* inefficient, but makes things simpler. */
// FIXME: this is braindead, we should write blocks of samples at a time
// convert a vector of samples from 1 channel to a float vector
convertFromFloat(captureFloatBuffer_, *buffer.getChannel(i));
}
}
示例3: convert_channels
void AudioBuffer::convert_channels(AudioBuffer &_dest, unsigned _frames_count)
{
AudioSpec destspec{m_spec.format, _dest.channels(), m_spec.rate};
if(_dest.spec() != destspec) {
throw std::logic_error("unsupported format");
}
_frames_count = std::min(frames(),_frames_count);
if(m_spec.channels == destspec.channels) {
_dest.add_frames(*this,_frames_count);
return;
}
switch(m_spec.format) {
case AUDIO_FORMAT_U8:
convert_channels<uint8_t>(*this,_dest,_frames_count);
break;
case AUDIO_FORMAT_S16:
convert_channels<int16_t>(*this,_dest,_frames_count);
break;
case AUDIO_FORMAT_F32:
convert_channels<float>(*this,_dest,_frames_count);
break;
default:
throw std::logic_error("unsupported format");
}
}
示例4: processAudio
void processAudio(AudioBuffer &buffer){
float y[getBlockSize()];
setCoeffs(getLpFreq(), 0.8f);
float delayTime = getParameterValue(PARAMETER_A); // get delay time value
float feedback = getParameterValue(PARAMETER_B); // get feedback value
float wetDry = getParameterValue(PARAMETER_D); // get gain value
if(abs(time - delayTime) < 0.01)
delayTime = time;
else
time = delayTime;
float delaySamples = delayTime * (delayBuffer.getSize()-1);
int size = buffer.getSize();
float* x = buffer.getSamples(0);
process(size, x, y); // low pass filter for delay buffer
for(int n = 0; n < size; n++){
//linear interpolation for delayBuffer index
dSamples = olddelaySamples + (delaySamples - olddelaySamples) * n / size;
y[n] = y[n] + feedback * delayBuffer.read(dSamples);
x[n] = (1.f - wetDry) * x[n] + wetDry * y[n]; //crossfade for wet/dry balance
delayBuffer.write(x[n]);
}
olddelaySamples = delaySamples;
}
示例5: DroneBoxPatch
DroneBoxPatch()
: mRamp(0.1)
, mPrevCoarsePitch(-1.)
, mPrevFinePitch(-1.)
, mPrevDecay(-1.)
{
registerParameter(PARAMETER_A, "Coarse Pitch", "Coarse Pitch");
registerParameter(PARAMETER_B, "Fine Pitch", "Fine Pitch");
registerParameter(PARAMETER_C, "Decay", "Decay");
registerParameter(PARAMETER_D, "Mix", "Mix");
mOldValues[0] = 0.f;
mOldValues[1] = 0.f;
mOldValues[2] = 0.f;
mOldValues[3] = 0.f;
for (int c=0;c<NUM_COMBS;c++)
{
AudioBuffer* buffer = createMemoryBuffer(2, BUF_SIZE);
mCombs[c].setBuffer(buffer->getSamples(0), buffer->getSamples(1));
mCombs[c].setSampleRate(getSampleRate());
mCombs[c].clearBuffer();
}
mDCBlockerL.setSampleRate(getSampleRate());
mDCBlockerR.setSampleRate(getSampleRate());
}
示例6: lua_AudioBuffer_addRef
int lua_AudioBuffer_addRef(lua_State* state)
{
// Get the number of parameters.
int paramCount = lua_gettop(state);
// Attempt to match the parameters to a valid binding.
switch (paramCount)
{
case 1:
{
if ((lua_type(state, 1) == LUA_TUSERDATA))
{
AudioBuffer* instance = getInstance(state);
instance->addRef();
return 0;
}
lua_pushstring(state, "lua_AudioBuffer_addRef - Failed to match the given parameters to a valid function signature.");
lua_error(state);
break;
}
default:
{
lua_pushstring(state, "Invalid number of parameters (expected 1).");
lua_error(state);
break;
}
}
return 0;
}
示例7: processAudio
void processAudio(AudioBuffer &buffer){
setCoeffs(getLpFreq(), 0.8f);
float delayTime = getParameterValue(PARAMETER_A); // get delay time value
float feedback = getParameterValue(PARAMETER_B); // get feedback value
float wetDry = getParameterValue(PARAMETER_D); // get gain value
float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
int size = buffer.getSize();
for (int ch = 0; ch<buffer.getChannels(); ++ch) {
float* buf = buffer.getSamples(ch);
process(size, buf, outBuf); // low pass filter for delay buffer
for(int i = 0; i < size; i++){
outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i]; //crossfade for wet/dry balance
delayBuffer.write(buf[i]);
}
}
}
示例8: put
// This one puts some data inside the ring buffer.
void RingBuffer::put(AudioBuffer& buf)
{
std::lock_guard<std::mutex> l(lock_);
const size_t sample_num = buf.frames();
const size_t buffer_size = buffer_.frames();
if (buffer_size == 0)
return;
size_t len = putLength();
if (buffer_size - len < sample_num)
discard(sample_num);
size_t toCopy = sample_num;
// Add more channels if the input buffer holds more channels than the ring.
if (buffer_.channels() < buf.channels())
buffer_.setChannelNum(buf.channels());
size_t in_pos = 0;
size_t pos = endPos_;
while (toCopy) {
size_t block = toCopy;
if (block > buffer_size - pos) // Wrap block around ring ?
block = buffer_size - pos; // Fill in to the end of the buffer
buffer_.copy(buf, block, in_pos, pos);
in_pos += block;
pos = (pos + block) % buffer_size;
toCopy -= block;
}
endPos_ = pos;
not_empty_.notify_all();
}
示例9: append
void SampleCollector::append(const AudioBuffer & buf)
{
uint32_t count = buf.get_count();
uint32_t last = (m_first + m_count) % m_length;
uint32_t firstHalf = std::min(count, m_length - last);
uint32_t secondHalf = count - firstHalf;
// Copy first half.
std::memcpy(m_samples + last, buf.get_buffer(), firstHalf * sizeof(float));
// Copy wrapped.
if (secondHalf)
{
std::memcpy(m_samples, buf.get_buffer() + firstHalf, secondHalf * sizeof(float));
}
uint32_t newLast = (last + count) % m_length;
if (m_count >= m_length && newLast > m_first)
{
m_first = newLast;
}
if (m_count < m_length)
{
m_count = std::min(m_count + count, m_length);
}
}
示例10: process
void JuceDemoPluginAudioProcessor::process (AudioBuffer<FloatType>& buffer,
MidiBuffer& midiMessages,
AudioBuffer<FloatType>& delayBuffer)
{
const int numSamples = buffer.getNumSamples();
// apply our gain-change to the incoming data..
applyGain (buffer, delayBuffer);
// Now pass any incoming midi messages to our keyboard state object, and let it
// add messages to the buffer if the user is clicking on the on-screen keys
keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
// and now get our synth to process these midi events and generate its output.
synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
// Apply our delay effect to the new output..
applyDelay (buffer, delayBuffer);
// In case we have more outputs than inputs, we'll clear any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, numSamples);
// Now ask the host for the current time so we can store it to be displayed later...
updateCurrentTimeInfoFromHost();
}
示例11: processAudio
void processAudio(AudioBuffer &buffer){
double rate = getSampleRate();
unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate);
sampleDelay = min(sampleDelay, bufferSize);
float feedback = getRampedParameterValue(PARAMETER_B);
float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C));
float dryWetMix = getRampedParameterValue(PARAMETER_D);
int size = buffer.getSize();
for(int ch = 0; ch<buffer.getChannels(); ++ch)
{
float* buf = buffer.getSamples(ch);
for (int i=0; i<size; ++i)
{
float delaySample = circularBuffer[writeIdx];
float v = buf[i] + circularBuffer[writeIdx] * feedback;
v = applyBias(v, bias);
circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits.
buf[i] = linearBlend(buf[i], delaySample, dryWetMix);
writeIdx = (++writeIdx) % sampleDelay;
}
}
}
示例12: applyDelay
void JuceDemoPluginAudioProcessor::applyDelay (AudioBuffer<FloatType>& buffer, AudioBuffer<FloatType>& delayBuffer)
{
const int numSamples = buffer.getNumSamples();
const float delayLevel = *delayParam;
int delayPos = 0;
for (int channel = 0; channel < getNumInputChannels(); ++channel)
{
FloatType* const channelData = buffer.getWritePointer (channel);
FloatType* const delayData = delayBuffer.getWritePointer (jmin (channel, delayBuffer.getNumChannels() - 1));
delayPos = delayPosition;
for (int i = 0; i < numSamples; ++i)
{
const FloatType in = channelData[i];
channelData[i] += delayData[delayPos];
delayData[delayPos] = (delayData[delayPos] + in) * delayLevel;
if (++delayPos >= delayBuffer.getNumSamples())
delayPos = 0;
}
}
delayPosition = delayPos;
}
示例13: processAudio
void processAudio(AudioBuffer &buffer) {
double rate = getSampleRate();
float p1 = getRampedParameterValue(PARAMETER_A);
float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
double step1 = freq1 / rate;
float amt1 = getRampedParameterValue(PARAMETER_B);
float p2 = getRampedParameterValue(PARAMETER_C);
float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
float amt2 = getRampedParameterValue(PARAMETER_D);
double step2 = freq2 / rate;
int size = buffer.getSize();
for(int ch = 0; ch<buffer.getChannels(); ++ch)
{
float* buf = buffer.getSamples(ch);
for (int i=0; i<size; ++i)
{
float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1
float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1
float gain1 = (amt1 * mod1) + (1 - amt1);
float gain2 = (amt2 * mod2) + (1 - amt2);
buf[i] = (gain1 * gain2) * buf[i];
phase1 += step1;
phase2 += step2;
}
}
}
示例14: lua_AudioBuffer_getRefCount
int lua_AudioBuffer_getRefCount(lua_State* state)
{
// Get the number of parameters.
int paramCount = lua_gettop(state);
// Attempt to match the parameters to a valid binding.
switch (paramCount)
{
case 1:
{
if ((lua_type(state, 1) == LUA_TUSERDATA))
{
AudioBuffer* instance = getInstance(state);
unsigned int result = instance->getRefCount();
// Push the return value onto the stack.
lua_pushunsigned(state, result);
return 1;
}
lua_pushstring(state, "lua_AudioBuffer_getRefCount - Failed to match the given parameters to a valid function signature.");
lua_error(state);
break;
}
default:
{
lua_pushstring(state, "Invalid number of parameters (expected 1).");
lua_error(state);
break;
}
}
return 0;
}
示例15: processAudio
void processAudio(AudioBuffer &buffer)
{
// Reasonably assume we will not have more than 32 channels
float* ins[32];
float* outs[32];
int n = buffer.getChannels();
if ( (fDSP.getNumInputs() < 32) && (fDSP.getNumOutputs() < 32) ) {
// create the table of input channels
for(int ch=0; ch<fDSP.getNumInputs(); ++ch) {
ins[ch] = buffer.getSamples(ch%n);
}
// create the table of output channels
for(int ch=0; ch<fDSP.getNumOutputs(); ++ch) {
outs[ch] = buffer.getSamples(ch%n);
}
// read OWL parameters and updates corresponding Faust Widgets zones
fUI.update();
// Process the audio samples
fDSP.compute(buffer.getSize(), ins, outs);
}
}