当前位置: 首页>>代码示例>>C++>>正文


C++ AudioSampleBuffer::getWritePointer方法代码示例

本文整理汇总了C++中AudioSampleBuffer::getWritePointer方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioSampleBuffer::getWritePointer方法的具体用法?C++ AudioSampleBuffer::getWritePointer怎么用?C++ AudioSampleBuffer::getWritePointer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在AudioSampleBuffer的用法示例。


在下文中一共展示了AudioSampleBuffer::getWritePointer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: resizeBuffers

void ChorusAudioProcessor::resizeBuffers(AudioSampleBuffer& depthOSC, AudioSampleBuffer& delayOSC, AudioSampleBuffer& delayBufferL, AudioSampleBuffer& delayBufferR)
{
	// Set the size all the buffers to be the same as the current sample rate
	// this is equivalent to 1 second
	delayBufferL.setSize(1, (int)getSampleRate(), false, true, true);
	delayBufferR.setSize(1, (int)getSampleRate(), false, true, true);
	depthOSC.setSize(1, (int)getSampleRate(), false, true, true);
	delayOSC.setSize(1, (int)getSampleRate(), false, true, true);

	// Update stored sample rate.
	FS = getSampleRate();

	// Update write pointers
	depthOSCwp = depthOSC.getWritePointer(0);
	delayOSCwp = delayOSC.getWritePointer(0);
	delayBufferLwp = delayBufferL.getWritePointer(0);
	delayBufferRwp = delayBufferR.getWritePointer(0);

	// Update read pointers
	depthOSCrp = depthOSC.getReadPointer(0);
	delayOSCrp = delayOSC.getReadPointer(0);
	delayBufferLrp = delayBufferL.getReadPointer(0);
	delayBufferRrp = delayBufferR.getReadPointer(0);
	LFOBuffer();

	// Update buffer index
	bidx = 0;
}
开发者ID:edStorey,项目名称:Chorus_Plugin,代码行数:28,代码来源:PluginProcessor.cpp

示例2: processBlock

void RadiumCompressorAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
  const bool is_on = is_bypassing==false;

  if(is_on==false && was_on==false) // bypass
    return;

  const int num_frames = buffer.getNumSamples();

#if BUILD_MONO
  float *buf[1] = {buffer.getWritePointer (0)};
#else
  float *buf[2] = {buffer.getWritePointer (0),
                   buffer.getWritePointer (1)};
#endif

  for(int i=0 ; i<num_frames ; i+=MAX_BUF_SIZE){      
    int num_frames_here;
    if(i+MAX_BUF_SIZE>=num_frames)
      num_frames_here = num_frames - i;
    else
      num_frames_here = MAX_BUF_SIZE;
      
    //fprintf(stderr,"i: %d, num_frames_here: %d, MAX_BUF_SIZE: %d\n",i,num_frames_here,MAX_BUF_SIZE);    
    process(this,buf,num_frames_here);
    //fprintf(stderr,"finished\n");
      
    buf[0] += MAX_BUF_SIZE;
#if BUILD_MONO==0
    buf[1] += MAX_BUF_SIZE;
#endif
  }
}
开发者ID:mcanthony,项目名称:DISTRHO-Ports-Extra,代码行数:33,代码来源:PluginProcessor.cpp

示例3: processBlock

void SoftSynthAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    MidiBuffer processedMidi;
    int time;
    MidiMessage m;
    
    for (MidiBuffer::Iterator i(midiMessages); i.getNextEvent(m, time);) {
        if (m.isNoteOn()) {
            m = MidiMessage::noteOn(m.getChannel(), m.getNoteNumber(), m.getVelocity());
            synth.keyPressed(m.getNoteNumber(), m.getVelocity());
        } else if (m.isNoteOff()) {
            m = MidiMessage::noteOff(m.getChannel(), m.getNoteNumber(), m.getVelocity());
            synth.keyReleased(m.getNoteNumber());
        }
        
        processedMidi.addEvent(m, time);
    }
    
    auto synthBuffer = synth.getNextBuffer(buffer.getNumSamples());
    float *leftData = buffer.getWritePointer(0);
    float *rightData = buffer.getWritePointer(1);
    for (int i = 0; i < buffer.getNumSamples(); ++i) {
        leftData[i] = synthBuffer[i];
        rightData[i] = synthBuffer[i];
    }
    
    midiMessages.swapWith(processedMidi);
}
开发者ID:tallen11,项目名称:polysynth-vst,代码行数:28,代码来源:PluginProcessor.cpp

示例4: processBlock

void SuperSpreadAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& /*midiMessages*/)
{
    unsigned int csr = _mm_getcsr();
    _mm_setcsr(csr | 0x8040);
    AudioProcessorParameter* mixParam = parameterState->getParameter("Mix");
    const NormalisableRange<float> mixRange(parameterState->getParameterRange("Mix"));

    const float spread0 = parameterState->getParameter("Spread")->getValue();
    const float mix = mixRange.convertFrom0to1(mixParam->getValue());
    const float detuneFade = jmin(spread0/0.1f, 1.f);

    const float detunedGain = mix >= 100.f ? 1.f : mix / 100.f;
    const float dryGain = mix <= 100.f ? 1.f : detuneFade < 1.f ? jmax(0.5f * (1.f - detuneFade), (200.f - mix) / 100.f) : (200.f - mix) / 100.f;
    const float spreadGain = detunedGain * detuneFade;


    const float spread = 0.5f * spread0*spread0;

    const int numChannels = buffer.getNumChannels();
    const int numSamples = buffer.getNumSamples();
    float* chL = buffer.getWritePointer(0);
    float* chR = numChannels == 2 ? buffer.getWritePointer(1) : nullptr;

    for (int i=0; i<12 / 2; ++i)
    {
        pitchBuffer.copyFrom(i, 0, chL, numSamples);

        if (chR != nullptr)
            pitchBuffer.copyFrom(6 + i, 0, chR, numSamples);
    }

    mainDelay.processBlock(chL, chR, numSamples);
    buffer.applyGain(dryGain);

    const float maxPitches[6] = {0.893f, 0.939f, 0.98f, 1.02f, 1.064f, 1.11f}; 

    for (int i=0; i<6; ++i)
    {
        shifter[i]->setPitch(std::pow(maxPitches[i], spread));
        shifter[i+6]->setPitch(std::pow(1.f / maxPitches[i], spread));

        float* procL = pitchBuffer.getWritePointer(i);
        float* procR = pitchBuffer.getWritePointer(i+6);

        shifter[i]->processBlock(procL, numSamples);
        buffer.addFrom(0, 0, procL, numSamples, spreadGain/* * gain*/);

        if (numChannels == 2)
        {
            shifter[i+6]->processBlock(procR, numSamples);
            buffer.addFrom(1, 0, procR, numSamples, spreadGain/* * gain*/);
        }
    }

    const float totalGain = spreadGain == 0.f ? 1.f : 1.41f / (1.f + std::sqrt(6.f) * spreadGain);

    buffer.applyGain(totalGain);

    _mm_setcsr(csr);
}
开发者ID:lkjbdsp,项目名称:lkjb-plugins,代码行数:60,代码来源:PluginProcessor.cpp

示例5: processBlock

void PitchestimatorpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
 
    int bufsize = buffer.getNumSamples();
    
    //main process loop
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getWritePointer (channel);
        fft->processForward(channelData, fftData, bufsize, nFFT);
        buffer.applyGain (channel, 0, bufsize, gain);
    }
    for (int i=0; i<bufsize; i++) {
        X[i] = fft->cartopolRadius(fftData[i][0], fftData[i][1]);
    }
    
    HS->generateCost(X, f0Area, numberOfHarmonics, bufsize, f0AreaSize, getSampleRate(), nFFT);
    pitchEstimate = HS->estimatePitch(f0Area, f0AreaSize);
    
    pitchText = String (pitchEstimate, 1);

}
开发者ID:adamski,项目名称:Pitch-Estimator-Plugin,代码行数:26,代码来源:PluginProcessor.cpp

示例6: processBlock

    void processBlock (AudioSampleBuffer& buffer, MidiBuffer&) override
    {
        AudioSampleBuffer mainInputOutput = getBusBuffer(buffer, true, 0);
        AudioSampleBuffer sideChainInput  = getBusBuffer(buffer, true, 1);

        float alphaCopy = *alpha;
        float thresholdCopy = *threshold;

        for (int j = 0; j < buffer.getNumSamples(); ++j)
        {
            float mixedSamples = 0.0f;
            for (int i = 0; i < sideChainInput.getNumChannels(); ++i)
                mixedSamples += sideChainInput.getReadPointer (i) [j];

            mixedSamples /= static_cast<float> (sideChainInput.getNumChannels());
            lowPassCoeff = (alphaCopy * lowPassCoeff) + ((1.0f - alphaCopy) * mixedSamples);

            if (lowPassCoeff >= thresholdCopy)
                sampleCountDown = (int) getSampleRate();

            // very in-effective way of doing this
            for (int i = 0; i < mainInputOutput.getNumChannels(); ++i)
                *mainInputOutput.getWritePointer (i, j) = sampleCountDown > 0 ? *mainInputOutput.getReadPointer (i, j) : 0.0f;

            if (sampleCountDown > 0)
                --sampleCountDown;
        }
    }
开发者ID:Neknail,项目名称:JUCE,代码行数:28,代码来源:NoiseGate.cpp

示例7: sizeof

void C74GenAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	assureBufferSize(buffer.getNumSamples());
	
	// fill input buffers
	for (int i = 0; i < C74_GENPLUGIN::num_inputs(); i++) {
		if (i < getNumInputChannels()) {
			for (int j = 0; j < m_CurrentBufferSize; j++) {
				m_InputBuffers[i][j] = buffer.getReadPointer(i)[j];
			}
		} else {
			memset(m_InputBuffers[i], 0, m_CurrentBufferSize *  sizeof(double));
		}
	}
	
	// process audio
	C74_GENPLUGIN::perform(m_C74PluginState,
								  m_InputBuffers,
								  C74_GENPLUGIN::num_inputs(),
								  m_OutputBuffers,
								  C74_GENPLUGIN::num_outputs(),
								  buffer.getNumSamples());

	// fill output buffers
	for (int i = 0; i < getNumOutputChannels(); i++) {
		if (i < C74_GENPLUGIN::num_outputs()) {
			for (int j = 0; j < buffer.getNumSamples(); j++) {
				buffer.getWritePointer(i)[j] = m_OutputBuffers[i][j];
			}
		} else {
			buffer.clear (i, 0, buffer.getNumSamples());
		}
	}
}
开发者ID:drumgod24,项目名称:gen-plugin-export,代码行数:34,代码来源:PluginProcessor.cpp

示例8: process

void FileReader::process (AudioSampleBuffer& buffer)
{
    const int samplesNeededPerBuffer = int (float (buffer.getNumSamples()) * (getDefaultSampleRate() / m_sysSampleRate));
    m_samplesPerBuffer.set(samplesNeededPerBuffer);
    // FIXME: needs to account for the fact that the ratio might not be an exact
    //        integer value
    
    // if cache window id == 0, we need to read and cache BUFFER_WINDOW_CACHE_SIZE more buffer windows
    if (bufferCacheWindow == 0)
    {
        switchBuffer();
    }
    
    for (int i = 0; i < currentNumChannels; ++i)
    {
        // offset readBuffer index by current cache window count * buffer window size * num channels
        input->processChannelData (*readBuffer + (samplesNeededPerBuffer * currentNumChannels * bufferCacheWindow),
                                   buffer.getWritePointer (i, 0),
                                   i,
                                   samplesNeededPerBuffer);
    }
    
    setTimestampAndSamples(timestamp, samplesNeededPerBuffer);
	timestamp += samplesNeededPerBuffer;

	static_cast<FileReaderEditor*> (getEditor())->setCurrentTime(samplesToMilliseconds(startSample + timestamp % (stopSample - startSample)));
    
    bufferCacheWindow += 1;
    bufferCacheWindow %= BUFFER_WINDOW_CACHE_SIZE;
}
开发者ID:eliezyer,项目名称:plugin-GUI,代码行数:30,代码来源:FileReader.cpp

示例9: processBlock

void Plugin::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    if (getNumInputChannels() != 2 && getNumOutputChannels() != 2) {
        return;
    }
    float* chan1 = buffer.getWritePointer(0);
    float* chan2 = buffer.getWritePointer(1);
    int sampleframes = buffer.getNumSamples();
    int blocks = sampleframes / kInternalBlocksize;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(pos)) {
    if ((&pos)->bpm == 0.0f) {
            parameters->setQuantizationDisabled();
            parameters->setParameter(kDelayQuant, 0.0f, false);
            parameters->setParameter(kIotQuant, 0.0f, false);
            parameters->setParameter(kDurQuant, 0.0f, false);
        }
        else
            parameters->time_quantizer->setPositionInfo(&pos);
    } else {
        parameters->setQuantizationDisabled();
    }

    block_sample_pos = 0;
    for (int i = 0; i < blocks; i++) {
        granulator->processInternalBlock(chan1, chan2, kInternalBlocksize);
        chan1 += kInternalBlocksize;
        chan2 += kInternalBlocksize;
        parameters->time_quantizer->incrementPositionInfo();
    }
    int samples_remaining = sampleframes % kInternalBlocksize;
    if (samples_remaining) {
        granulator->processInternalBlock(chan1, chan2, samples_remaining);
    }
}
开发者ID:mcanthony,项目名称:DISTRHO-Ports-Extra,代码行数:35,代码来源:Plugin.cpp

示例10: processBlock

void ZenGuitestAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int totalNumInputChannels  = getTotalNumInputChannels();
    const int totalNumOutputChannels = getTotalNumOutputChannels();

    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    // This is here to avoid people getting screaming feedback
    // when they first compile a plugin, but obviously you don't need to keep
    // this code if your algorithm always overwrites all the output channels.
    for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
        buffer.clear (i, 0, buffer.getNumSamples());

    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    for (int channel = 0; channel < totalNumInputChannels; ++channel)
    {
        float* channelData = buffer.getWritePointer (channel);

        // ..do something to the data...
    }
	
	/*float* leftData = buffer.getWritePointer(0);  //leftData references left channel now
	float* rightData = buffer.getWritePointer(1); //right data references right channel now
	for (long i = 0; i < buffer.getNumSamples(); i++)
	{
		leftData[i] = 0;
		rightData[i] = 0;
	}*/
}
开发者ID:SonicZentropy,项目名称:ZenGUITest,代码行数:31,代码来源:PluginProcessor.cpp

示例11: processBlock

void FilterGuiDemoAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    float numSamples = buffer.getNumSamples();
    float currentSampleRate = getSampleRate();
    
    //Handles filter being added onto an already playing audio track where some hosts will not call prepare to play method.
    if (filter1->getSampleRate() != currentSampleRate)
    {
        filter1->initializeFilter(currentSampleRate, defaultMinFilterFrequency, defaultMaxFilterFrequency);
    }
    
    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    // I've added this to avoid people getting screaming feedback
    // when they first compile the plugin, but obviously you don't need to
    // this code if your algorithm already fills all the output channels.
    for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());

    // MAIN AUDIO PROCESSING BLOCK. PROCESS FILTER TWICE FOR STEREO CHANNELS
    for (int channel = 0; channel < getTotalNumInputChannels(); ++channel)
    {
        const float* input = buffer.getReadPointer(channel);
        float* output = buffer.getWritePointer (channel);
        
        for (int i = 0; i < numSamples; i++)
        {
            output[i] = filter1->processFilter(input[i], channel);
        }
    }
}
开发者ID:JoshMarler,项目名称:filter-gui-demo,代码行数:32,代码来源:PluginProcessor.cpp

示例12: processBlock

void MLPluginProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
 	if (mEngine.isEnabled() && !isSuspended())
	{
		unsigned samples = buffer.getNumSamples();
		
		// get current time from host.
		// should refer to the start of the current block.
		AudioPlayHead::CurrentPositionInfo newTime;
		if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime))
		{
			lastPosInfo = newTime;
		}
		else
		{
			lastPosInfo.resetToDefault();
		}

		// set host phasor 
		double bpm = lastPosInfo.isPlaying ? lastPosInfo.bpm : 0.;
		double ppqPosition = lastPosInfo.ppqPosition;
		double secsPosition = lastPosInfo.timeInSeconds;
		int64 samplesPosition = lastPosInfo.timeInSamples;
		bool isPlaying = lastPosInfo.isPlaying;
		
		// TEST
		if(0)
		if(lastPosInfo.isPlaying)
		{
			debug() << "bpm:" << lastPosInfo.bpm 
			<< " ppq:" << std::setprecision(5) << ppqPosition << std::setprecision(2) 
			<< " secs:" << secsPosition << "\n";
		}
			
		// set Engine I/O.  done here each time because JUCE may change pointers on us.  possibly.
		MLDSPEngine::ClientIOMap ioMap;
		for (int i=0; i<getNumInputChannels(); ++i)
		{
			ioMap.inputs[i] = buffer.getReadPointer(i);
		}		
		for (int i=0; i<getNumOutputChannels(); ++i)
		{
			ioMap.outputs[i] = buffer.getWritePointer(i);
		}
		mEngine.setIOBuffers(ioMap);
        
        if(acceptsMidi())
        {
            convertMIDIToEvents(midiMessages, mControlEvents);
            midiMessages.clear(); // otherwise messages will be passed back to the host
        }
        mEngine.processBlock(samples, mControlEvents, samplesPosition, secsPosition, ppqPosition, bpm, isPlaying);
    }
	else
	{
		buffer.clear();
	}
}
开发者ID:afofo,项目名称:madronalib,代码行数:58,代码来源:MLPluginProcessor.cpp

示例13: processBlock

void JuceDemoPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int numSamples = buffer.getNumSamples();
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);


    for (int channel = 0; channel < getNumInputChannels(); channel++)
    {
        float* channelData = buffer.getWritePointer (channel);
        std::deque<Particle> &particles = m_particles[channel];
        for(int sample = 0; sample < numSamples; sample++) {
            for(Particle& p : particles) {
                p.velocity() += p.acceleration() * 0.1 * 0.5;
                p.position() += p.velocity() * 0.1;
            }

            Particle* pFix = m_fixedParticle[channel];
            Particle* pOut = m_outputParticle[channel];
            Particle* pIn = m_inputParticle[channel];

            for(Particle& p : particles) {
                p.acceleration() = Vector3D(0.0, 0.0, 0.0);
            }

            pFix->position() = Vector3D(0.0, 0.0, 0.0);
            pIn->position() = Vector3D(channelData[sample] + m_particleCount + offset->getValue(), 0.0, 0.0);

            for(Spring& spring : m_springs[channel]) {
                Particle* pa = spring.from;
                Particle* pb = spring.to;

                double diff = pb->position().x - pa->position().x;
                double r = fabs(diff);

                double d = spring.d;
                double k = spring.k*springConstant->getValue();

                Vector3D force = Vector3D(k*(r-d), 0.0, 0.0);
                pa->acceleration() += force;
                pb->acceleration() -= force;
            }

            for(Particle& p : particles) {
                p.velocity() *= velocityFactor->getValue();
                p.velocity() += p.acceleration() * 0.1 * 0.5;
            }

            channelData[sample] = pOut->position().x - 1.0;
        }
    }

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
}
开发者ID:dragly,项目名称:spring-all-year,代码行数:58,代码来源:PluginProcessor.cpp

示例14: convertFromOpenSL

    static void convertFromOpenSL (const float* srcInterleaved, AudioSampleBuffer& audioBuffer)
    {
        if (audioBuffer.getNumChannels() == 1)
        {
            jassert (srcInterleaved == audioBuffer.getWritePointer (0));
            return;
        }

        for (int i = 0; i < audioBuffer.getNumChannels(); ++i)
        {
            typedef AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst> DstSampleType;
            typedef AudioData::Pointer<AudioData::Float32, AudioData::LittleEndian, AudioData::Interleaved,    AudioData::Const>    SrcSampleType;

            DstSampleType dstData (audioBuffer.getWritePointer (i));
            SrcSampleType srcData (srcInterleaved + i, audioBuffer.getNumChannels());
            dstData.convertSamples (srcData, audioBuffer.getNumSamples());
        }
    }
开发者ID:soundradix,项目名称:JUCE,代码行数:18,代码来源:juce_android_OpenSL.cpp

示例15: processBlock

void NetProcess::processBlock(AudioSampleBuffer &buffer, MidiBuffer &midiMessages)
{
    
    socket->write(targetHost, targetPort, buffer.getReadPointer(0), buffer.getNumChannels() * buffer.getNumSamples() * sizeof(float));
    
    buffer.clear();
    socket->read(buffer.getWritePointer(0), buffer.getNumChannels() * buffer.getNumSamples() * sizeof(float), false);
    
}
开发者ID:alexgustafson,项目名称:DiauproProject,代码行数:9,代码来源:NetProcess.cpp


注:本文中的AudioSampleBuffer::getWritePointer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。