本文整理汇总了C++中AudioSampleBuffer::setSample方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer::setSample方法的具体用法?C++ AudioSampleBuffer::setSample怎么用?C++ AudioSampleBuffer::setSample使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioSampleBuffer
的用法示例。
在下文中一共展示了AudioSampleBuffer::setSample方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: createTestSound
// create a test sound which consists of a series of randomly-spaced audio spikes..
void createTestSound()
{
const int length = ((int) sampleRate) / 4;
testSound.setSize (1, length);
testSound.clear();
Random rand;
for (int i = 0; i < length; ++i)
testSound.setSample (0, i, (rand.nextFloat() - rand.nextFloat() + rand.nextFloat() - rand.nextFloat()) * 0.06f);
spikePositions.clear();
int spikePos = 0;
int spikeDelta = 50;
while (spikePos < length - 1)
{
spikePositions.add (spikePos);
testSound.setSample (0, spikePos, 0.99f);
testSound.setSample (0, spikePos + 1, -0.99f);
spikePos += spikeDelta;
spikeDelta += spikeDelta / 6 + rand.nextInt (5);
}
}
示例2: processBlock
void BitcrushAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
this->initializing(buffer);
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
buffer.clear(i, 0, buffer.getNumSamples());
}
float crush = bitcrush->getValue();
float wet_ = wet->getValue();
int groupedSamples = std::max(1.f, downsample->getValue() * 100);
float bitdepth = 12. * (1. - crush) + 1. * crush;
int steps = exp2(bitdepth);
// This is the place where you'd normally do the guts of your plugin's
// audio processing...
for (int channel = 0; channel < getNumInputChannels(); channel++)
{
for (int sample = 0; sample < buffer.getNumSamples() - groupedSamples; sample += groupedSamples) {
float averagedSample = 0.;
for (int i = 0; i < groupedSamples; i++) {
averagedSample += buffer.getSample(channel, i + sample) / groupedSamples;
}
int discretizedSample = averagedSample * steps;
float crushed = float(discretizedSample) / steps;
for (int i = 0; i < groupedSamples; i++) {
float sampleValue = buffer.getSample(channel, i + sample);
buffer.setSample(channel, i + sample, sampleValue * (1. - wet_) + crushed * wet_);
}
}
float averagedSample = 0.;
for (int i = (buffer.getNumSamples() / groupedSamples) * groupedSamples; i < buffer.getNumSamples(); i++) {
averagedSample += buffer.getSample(channel, i) / (buffer.getNumSamples() % groupedSamples);
}
float bitdepth = 12. * (1. - crush) + 1. * crush;
int steps = exp2(bitdepth);
int discretizedSample = averagedSample * steps;
float crushed = float(discretizedSample) / steps;
for (int i = (buffer.getNumSamples() / groupedSamples) * groupedSamples; i < buffer.getNumSamples(); i++) {
float sampleValue = buffer.getSample(channel, i);
buffer.setSample(channel, i, sampleValue * (1. - wet_) + crushed * wet_);
}
}
this->meteringBuffer(buffer);
this->finalizing(buffer);
}
示例3: processBlock
void EQNode::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
float in_samp_m1, in_samp_m2;
float out_samp_m1, out_samp_m2;
for(int channel = 0; channel < buffer.getNumChannels(); ++channel)
{
for(int sample = 0; sample < buffer.getNumSamples(); ++sample)
{
float in_samp = buffer.getSample(channel, sample);
float out_samp = (_b0 * in_samp + _b1 * in_samp_m1 + _b2 * in_samp_m2 - _a1 * out_samp_m1 - _a2 * out_samp_m2) / _a0;
in_samp_m2 = in_samp_m1;
in_samp_m1 = in_samp;
out_samp_m2 = out_samp_m1;
out_samp_m1 = out_samp;
buffer.setSample(channel, sample, out_samp);
}
}
}
示例4: processBlock
void VolumeAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
this->initializing(buffer);
float currentVolumeL, currentVolumeR, bufferValue, oldVolumeL, oldVolumeR;
unsigned int maxInterpolation;
if (stereoCoupling->getBoolValue())
{
currentVolumeL = pow(10.f, (volumeL->getValue()) / 10.f);
oldVolumeL = pow(10.f, (volumeL->getOldValue()) / 10.f);
maxInterpolation = int(buffer.getNumSamples() * volumeL->getBufferScalingValue());
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(oldVolumeL + ((interpolationIteration + 1) * (currentVolumeL - oldVolumeL) \
/ maxInterpolation)));
bufferValue = buffer.getSample(/*channel*/ 1, interpolationIteration);
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
(oldVolumeL + ((interpolationIteration + 1) * (currentVolumeL - oldVolumeL) \
/ maxInterpolation)));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * currentVolumeL);
bufferValue = buffer.getSample(/*channel*/ 1, bufferIteration);
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * currentVolumeL);
}
}
else
{
currentVolumeR = pow(10.f, volumeR->getValue() / 10.f);
currentVolumeL = pow(10.f, volumeL->getValue() / 10.f);
oldVolumeL = pow(10.f, volumeL->getOldValue() / 10.f);
oldVolumeR = pow(10.f, volumeR->getOldValue() / 10.f);
maxInterpolation = int(buffer.getNumSamples() * volumeL->getBufferScalingValue());
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(oldVolumeL + ((interpolationIteration + 1) * (currentVolumeL - oldVolumeL) \
/ maxInterpolation)));
bufferValue = buffer.getSample(/*channel*/ 1, interpolationIteration);
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
(oldVolumeR + ((interpolationIteration + 1) * (currentVolumeR - oldVolumeR) \
/ maxInterpolation )));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * currentVolumeL);
bufferValue = buffer.getSample(/*channel*/ 1, bufferIteration);
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * currentVolumeR);
}
}
//Set current values as old values for interpolation in next buffer iteration
volumeL->setOldValue(volumeL->getValue());
volumeR->setOldValue(volumeR->getValue());
this->meteringBuffer(buffer);
this->finalizing(buffer);
}
示例5: processBlock
void PanAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
this->initializing(buffer);
for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
buffer.clear(i, 0, buffer.getNumSamples());
}
// currentPanning: Set by Editor before this buffer iteration
// oldPanning: Was Set in Editor after last buffer Iteration
// Interpolation from oldPanning to currentPanning
// momentaryPanning: Helper Variable, keeps results of current Interpolation iteration during Interpolation
float currentPanning, oldPanning, bufferValue, momentaryPanning;
unsigned int maxInterpolation;
currentPanning = panning->getNormalizedValue();
oldPanning = panning->getNormalizedOldValue();
maxInterpolation = int(buffer.getNumSamples() * panning->getBufferScalingValue());
if (getNumInputChannels() == 1)
{
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
momentaryPanning = (oldPanning + ((interpolationIteration + 1) * (currentPanning - oldPanning) / maxInterpolation));
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(1.f - 2 * (std::max(0.5f, momentaryPanning) - 0.5f)));
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
2 * (std::min(0.5f, momentaryPanning)));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * (1.f - 2 * (std::max(0.5f, currentPanning) - 0.5f)));
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * 2 * (std::min(0.5f, currentPanning)));
}
}
else
{
for (size_t interpolationIteration = 0; interpolationIteration < maxInterpolation; interpolationIteration++)
{
momentaryPanning = (oldPanning + ((interpolationIteration + 1) * (currentPanning - oldPanning) / maxInterpolation));
bufferValue = buffer.getSample(/*channel*/ 0, interpolationIteration);
buffer.setSample(/*channel*/ 0, interpolationIteration, bufferValue * \
(1.f - 2 * (std::max(0.5f, momentaryPanning) - 0.5f)));
bufferValue = buffer.getSample(/*channel*/ 1, interpolationIteration);
buffer.setSample(/*channel*/ 1, interpolationIteration, bufferValue * \
2 * (std::min(0.5f, momentaryPanning)));
}
for (size_t bufferIteration = maxInterpolation; bufferIteration < buffer.getNumSamples(); bufferIteration++)
{
bufferValue = buffer.getSample(/*channel*/ 0, bufferIteration);
buffer.setSample(/*channel*/ 0, bufferIteration, bufferValue * (1.f - 2 * (std::max(0.5f, currentPanning) - 0.5f)));
bufferValue = buffer.getSample(/*channel*/ 1, bufferIteration);
buffer.setSample(/*channel*/ 1, bufferIteration, bufferValue * 2 * (std::min(0.5f, currentPanning)));
}
}
panning->setOldValue();
this->meteringBuffer(buffer);
this->finalizing(buffer);
}
示例6: processBlock
void SynthAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
const int totalNumInputChannels = getTotalNumInputChannels();
const int totalNumOutputChannels = getTotalNumOutputChannels();
// In case we have more outputs than inputs, this code clears any output
// channels that didn't contain input data, (because these aren't
// guaranteed to be empty - they may contain garbage).
// This is here to avoid people getting screaming feedback
// when they first compile a plugin, but obviously you don't need to keep
// this code if your algorithm always overwrites all the output channels.
MidiBuffer Midi;
int time;
MidiMessage m;
for(MidiBuffer::Iterator i(midiMessages); i.getNextEvent(m, time);){
//handle monophonic on/off of notes
if(m.isNoteOn()){
noteOn++;
}
if(m.isNoteOff()){
noteOn--;
}
if(noteOn > 0){
monoNoteOn = 1.0f;
env.reset();
//handle the pitch of the note
noteVal = m.getNoteNumber();
osc.setF(m.getMidiNoteInHertz(noteVal));
}else{
monoNoteOn = 0.0f;
}
}
for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
buffer.clear (i, 0, buffer.getNumSamples());
for (int channel = 0; channel < totalNumOutputChannels; ++channel){
//just do the synth stuff on one channel.
if(channel == 0){
for(int sample = 0; sample < buffer.getNumSamples(); ++sample){
//do this stuff here. it's terribly inefficient..
freqValScaled = 20000.0f * pow(freqP->get(), 3.0f);
envValScaled = 10000.0f * pow(envP->get(), 3.0f);
speedValScaled = pow((1.0f - speedP->get()), 2.0f);
oscValScaled = (oscP->get() - 0.5f) * 70.0f;
detValScaled = (detP->get() - 0.5f) * 24.0f;
filter.setFc(freqSmoothing.process(freqValScaled + (envValScaled * pow(env.process(),3.0f))) / UPSAMPLING);
env.setSpeed(speedValScaled);
filter.setQ(qP->get());
float frequency = noteVal + 24.0f + oscValScaled + modOsc.process(0) + (driftSmoothing.process(random.nextFloat() - 0.5f) * 20.0f);
float frequency2 = exp((frequency + detValScaled + (driftSmoothing2.process(random.nextFloat() - 0.5f) * 10.0f)) / 17.31f) / UPSAMPLING;
frequency = exp(frequency / 17.31f) / UPSAMPLING;
osc.setF(frequency);
osc2.setF(frequency2);
float monoNoteOn2 = ampSmoothing.process(monoNoteOn);
float data;
for(int i = 0; i < UPSAMPLING; i++){
data = 20.0f * filter.process(0.1f * osc.process() + ampP->get() * 0.1f * osc2.process());
}
data *= monoNoteOn2;
buffer.setSample(0, sample, data);
buffer.setSample(1, sample, data);
}
}
}
}