本文整理汇总了C++中AudioBuffer::getSamples方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioBuffer::getSamples方法的具体用法?C++ AudioBuffer::getSamples怎么用?C++ AudioBuffer::getSamples使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioBuffer
的用法示例。
在下文中一共展示了AudioBuffer::getSamples方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processAudio
void processAudio(AudioBuffer &buffer) {
float delayTime, feedback, wetDry;
delayTime = getParameterValue(PARAMETER_A);
feedback = getParameterValue(PARAMETER_B);
wetDry = getParameterValue(PARAMETER_D);
int size = buffer.getSize();
int32_t newDelay;
if(abs(time - delayTime) > 0.01){
newDelay = delayTime * (delayBuffer.getSize()-1);
time = delayTime;
}else{
newDelay = delay;
}
float* x = buffer.getSamples(0);
float y;
for (int n = 0; n < size; n++){
// y = buf[i] + feedback * delayBuffer.read(delay);
// buf[i] = wetDry * y + (1.f - wetDry) * buf[i];
// delayBuffer.write(buf[i]);
if(newDelay - delay > 4){
y = getDelayAverage(delay-5, 5);
delay -= 5;
}else if(delay - newDelay > 4){
y = getDelayAverage(delay+5, 5);
delay += 5;
}else{
y = delayBuffer.read(delay);
}
x[n] = wetDry * y + (1.f - wetDry) * x[n]; // crossfade for wet/dry balance
delayBuffer.write(feedback * x[n]);
}
}
示例2: mixAudioBufferChannelsLogrithmicDRC
void mixAudioBufferChannelsLogrithmicDRC(AudioBuffer &audioBuffer, std::vector<float> &channelLevels, AudioBuffer &mixBuffer, float threshold)
{
if(audioBuffer.getChannels() == 0)
return;
AudioFormat format=audioBuffer.getFormat();
unsigned int samples=audioBuffer.getSamples();
switch(format)
{
case AudioFormat::UInt8:
case AudioFormat::UInt8P:
mixChannelsLogrithmicDRC<uint8_t>((uint8_t *)audioBuffer.getBuffer(), channelLevels, (uint8_t *)mixBuffer.getBuffer(), samples, threshold);
break;
case AudioFormat::Int16:
case AudioFormat::Int16P:
mixChannelsLogrithmicDRC<int16_t>((int16_t *)audioBuffer.getBuffer(), channelLevels, (int16_t *)mixBuffer.getBuffer(), samples, threshold);
break;
case AudioFormat::Int32:
case AudioFormat::Int32P:
mixChannelsLogrithmicDRC<int32_t>((int32_t *)audioBuffer.getBuffer(), channelLevels, (int32_t *)mixBuffer.getBuffer(), samples, threshold);
break;
case AudioFormat::Float:
case AudioFormat::FloatP:
mixChannelsLogrithmicDRC<float>((float *)audioBuffer.getBuffer(), channelLevels, (float *)mixBuffer.getBuffer(), samples, threshold);
break;
case AudioFormat::Double:
case AudioFormat::DoubleP:
mixChannelsLogrithmicDRC<double>((double *)audioBuffer.getBuffer(), channelLevels, (double *)mixBuffer.getBuffer(), samples, threshold);
break;
}
}
示例3: processAudio
void processAudio(AudioBuffer& buf){
float minf = getParameterValue(PARAMETER_A)*0.1 + 0.001;
float maxf = min(0.4, minf + getParameterValue(PARAMETER_B)*0.2);
// range should be exponentially related to minf
// int tones = getParameterValue(PARAMETER_C)*(TONES-1) + 1;
int tones = 12;
float spread = getParameterValue(PARAMETER_C) + 1.0;
float rate = 1.0 + (getParameterValue(PARAMETER_D) - 0.5)*0.00002;
int size = buf.getSize();
FloatArray out = buf.getSamples(LEFT_CHANNEL);
float amp;
for(int t=1; t<tones; ++t)
inc[t] = inc[t-1]*spread;
for(int i=0; i<size; ++i){
for(int t=0; t<tones; ++t){
amp = getAmplitude((inc[t]-minf)/(maxf-minf));
out[i] += amp * getWave(acc[t]);
acc[t] += inc[t];
if(acc[t] > 1.0)
acc[t] -= 1.0;
else if(acc[t] < 0.0)
acc[t] += 1.0;
inc[t] *= rate;
}
}
if(inc[0] > maxf)
inc[0] = minf;
// while(inc[0] > minf)
// inc[0] *= 0.5;
else if(inc[0] < minf)
inc[0] = maxf;
// while(inc[0] < maxf)
// inc[0] *= 2.0;
}
示例4: processAudio
void processAudio(AudioBuffer &buffer){
double rate = getSampleRate();
unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate);
sampleDelay = min(sampleDelay, bufferSize);
float feedback = getRampedParameterValue(PARAMETER_B);
float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C));
float dryWetMix = getRampedParameterValue(PARAMETER_D);
int size = buffer.getSize();
for(int ch = 0; ch<buffer.getChannels(); ++ch)
{
float* buf = buffer.getSamples(ch);
for (int i=0; i<size; ++i)
{
float delaySample = circularBuffer[writeIdx];
float v = buf[i] + circularBuffer[writeIdx] * feedback;
v = applyBias(v, bias);
circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits.
buf[i] = linearBlend(buf[i], delaySample, dryWetMix);
writeIdx = (++writeIdx) % sampleDelay;
}
}
}
示例5: processAudio
void processAudio(AudioBuffer &buffer){
FloatArray l1 = buffer.getSamples(LEFT_CHANNEL);
FloatArray r1 = buffer.getSamples(RIGHT_CHANNEL);
FloatArray l2 = buf->getSamples(LEFT_CHANNEL);
FloatArray r2 = buf->getSamples(RIGHT_CHANNEL);
float morph = getParameterValue(MORPH_PARAMETER);
l2.copyFrom(l1);
r2.copyFrom(r1);
green.processAudio(*buf);
red.processAudio(buffer);
int size = buffer.getSize();
for(int i=0; i<size; ++i){
l1[i] = l1[i]*(1-morph) + l2[i]*morph;
r1[i] = r1[i]*(1-morph) + r2[i]*morph;
}
}
示例6: processAudio
void processAudio(AudioBuffer &buffer) {
double rate = getSampleRate();
float p1 = getRampedParameterValue(PARAMETER_A);
float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
double step1 = freq1 / rate;
float amt1 = getRampedParameterValue(PARAMETER_B);
float p2 = getRampedParameterValue(PARAMETER_C);
float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
float amt2 = getRampedParameterValue(PARAMETER_D);
double step2 = freq2 / rate;
int size = buffer.getSize();
for(int ch = 0; ch<buffer.getChannels(); ++ch)
{
float* buf = buffer.getSamples(ch);
for (int i=0; i<size; ++i)
{
float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1
float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1
float gain1 = (amt1 * mod1) + (1 - amt1);
float gain2 = (amt2 * mod2) + (1 - amt2);
buf[i] = (gain1 * gain2) * buf[i];
phase1 += step1;
phase2 += step2;
}
}
}
示例7: processAudio
void processAudio(AudioBuffer &buffer){
float y[getBlockSize()];
setCoeffs(getLpFreq(), 0.8f);
float delayTime = getParameterValue(PARAMETER_A); // get delay time value
float feedback = getParameterValue(PARAMETER_B); // get feedback value
float wetDry = getParameterValue(PARAMETER_D); // get gain value
if(abs(time - delayTime) < 0.01)
delayTime = time;
else
time = delayTime;
float delaySamples = delayTime * (delayBuffer.getSize()-1);
int size = buffer.getSize();
float* x = buffer.getSamples(0);
process(size, x, y); // low pass filter for delay buffer
for(int n = 0; n < size; n++){
//linear interpolation for delayBuffer index
dSamples = olddelaySamples + (delaySamples - olddelaySamples) * n / size;
y[n] = y[n] + feedback * delayBuffer.read(dSamples);
x[n] = (1.f - wetDry) * x[n] + wetDry * y[n]; //crossfade for wet/dry balance
delayBuffer.write(x[n]);
}
olddelaySamples = delaySamples;
}
示例8: processAudio
void processAudio(AudioBuffer &buffer){
setCoeffs(getLpFreq(), 0.8f);
float delayTime = getParameterValue(PARAMETER_A); // get delay time value
float feedback = getParameterValue(PARAMETER_B); // get feedback value
float wetDry = getParameterValue(PARAMETER_D); // get gain value
float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
int size = buffer.getSize();
for (int ch = 0; ch<buffer.getChannels(); ++ch) {
float* buf = buffer.getSamples(ch);
process(size, buf, outBuf); // low pass filter for delay buffer
for(int i = 0; i < size; i++){
outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i]; //crossfade for wet/dry balance
delayBuffer.write(buf[i]);
}
}
}
示例9: processAudio
void processAudio(AudioBuffer &buffer) {
int size = buffer.getSize();
float y;
rate = Rate(getParameterValue(PARAMETER_A));
depth = getParameterValue(PARAMETER_B);
feedback = getParameterValue(PARAMETER_C);
//calculate and update phaser sweep lfo...
float d = _dmin + (_dmax-_dmin) * ((sin( _lfoPhase ) + 1.f)/2.f);
_lfoPhase += rate;
if( _lfoPhase >= M_PI * 2.f )
_lfoPhase -= M_PI * 2.f;
//update filter coeffs
for( int i=0; i<6; i++ )
_alps[i].Delay( d );
// for (int ch = 0; ch<buffer.getChannels(); ++ch) {
float* buf = buffer.getSamples(0);
for (int i = 0; i < size; i++) {
//calculate output
y = _alps[0].Update(_alps[1].Update(_alps[2].Update(_alps[3].Update(_alps[4].Update(
_alps[5].Update( buf[i] + _zm1 * feedback ))))));
_zm1 = y;
buf[i] = buf[i] + y * depth;
// }
}
}
示例10: processAudio
void processAudio(AudioBuffer &buffer){
// assert_param(buffer.getChannels() > 1);
float gainLL = getParameterValue(PARAMETER_A);
float gainLR = getParameterValue(PARAMETER_B);
float gainRL = getParameterValue(PARAMETER_C);
float gainRR = getParameterValue(PARAMETER_D);
int size = buffer.getSize();
float* left = buffer.getSamples(0);
float* right = buffer.getChannels() > 1 ? buffer.getSamples(1) : left;
float l, r;
for(int i=0; i<size; ++i){
l = gainLL*left[i] + gainLR*right[i];
r = gainRL*left[i] + gainRR*right[i];
left[i] = l;
right[i] = r;
}
}
示例11: FixedDelayPatch
FixedDelayPatch() {
AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
registerParameter(PARAMETER_A, "Feedback");
registerParameter(PARAMETER_B, "Mix");
registerParameter(PARAMETER_C, "");
registerParameter(PARAMETER_D, "");
}
示例12: processAudio
void processAudio(AudioBuffer &buffer) {
float fundamental = getParameterValue(PARAMETER_A)*5.0 - 1.0;
FloatArray left = buffer.getSamples(LEFT_CHANNEL);
hz.setTune(fundamental);
float freq = hz.getFrequency(0);
algo.setFrequency(freq);
algo.getSamples(left);
}
示例13: process
void PatchController::process(AudioBuffer& buffer){
if(activeSlot == GREEN && green.index != settings.patch_green){
memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
// green must be active slot when patch constructor is called
green.setPatch(settings.patch_green);
codec.softMute(false);
debugClear();
return;
}else if(activeSlot == RED && red.index != settings.patch_red){
memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
// red must be active slot when constructor is called
red.setPatch(settings.patch_red);
codec.softMute(false);
debugClear();
return;
}
switch(mode){
case SINGLE_MODE:
case DUAL_GREEN_MODE:
green.setParameterValues(getAnalogValues());
green.patch->processAudio(buffer);
break;
case DUAL_RED_MODE:
red.setParameterValues(getAnalogValues());
red.patch->processAudio(buffer);
break;
case SERIES_GREEN_MODE:
green.setParameterValues(getAnalogValues());
green.patch->processAudio(buffer);
red.patch->processAudio(buffer);
break;
case SERIES_RED_MODE:
red.setParameterValues(getAnalogValues());
green.patch->processAudio(buffer);
red.patch->processAudio(buffer);
break;
case PARALLEL_GREEN_MODE:
green.setParameterValues(getAnalogValues());
processParallel(buffer);
break;
case PARALLEL_RED_MODE:
red.setParameterValues(getAnalogValues());
processParallel(buffer);
break;
}
}
示例14: processAudio
void processAudio(AudioBuffer &buffer) {
float* x = buffer.getSamples(0);
float feedback = getParameterValue(PARAMETER_A);
float mix = getParameterValue(PARAMETER_B);
for(int n = 0; n < buffer.getSize(); n++){
x[n] = delayBuffer.tail()*mix + x[n]*(1.0f-mix);
delayBuffer.write(feedback * x[n]);
}
}
示例15: FlangerPatch
FlangerPatch(){
AudioBuffer* buffer = createMemoryBuffer(1, FLANGER_BUFFER_SIZE);
delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
registerParameter(PARAMETER_A, "Rate");
registerParameter(PARAMETER_B, "Depth");
registerParameter(PARAMETER_C, "Feedback");
registerParameter(PARAMETER_D, "");
phase = 0;
}