当前位置: 首页>>代码示例>>C++>>正文


C++ AudioBuffer::getChannels方法代码示例

本文整理汇总了C++中AudioBuffer::getChannels方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioBuffer::getChannels方法的具体用法?C++ AudioBuffer::getChannels怎么用?C++ AudioBuffer::getChannels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在AudioBuffer的用法示例。


在下文中一共展示了AudioBuffer::getChannels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: processAudio

    void processAudio(AudioBuffer &buffer){

    double rate = getSampleRate();
    

    unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate);
    sampleDelay = min(sampleDelay, bufferSize);
    float feedback = getRampedParameterValue(PARAMETER_B);
    float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C));
    float dryWetMix = getRampedParameterValue(PARAMETER_D);
    

    int size = buffer.getSize();

 	for(int ch = 0; ch<buffer.getChannels(); ++ch)
 	{
	    float* buf = buffer.getSamples(ch);

	    for (int i=0; i<size; ++i)
	    {
	      float delaySample = circularBuffer[writeIdx];
	      float v = buf[i] + circularBuffer[writeIdx] * feedback;
	      v = applyBias(v, bias);
	      circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits.
	      buf[i] = linearBlend(buf[i], delaySample, dryWetMix);

	      writeIdx = (++writeIdx) % sampleDelay;
	    }
		
  	}
  }
开发者ID:chrissie-c,项目名称:OwlPatches,代码行数:31,代码来源:BiasedDelayPatch.hpp

示例2: processAudio

    void processAudio(AudioBuffer &buffer) {
        double rate = getSampleRate();

        float p1 = getRampedParameterValue(PARAMETER_A);
        float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        double step1 = freq1 / rate;
        float amt1 = getRampedParameterValue(PARAMETER_B);

        float p2 = getRampedParameterValue(PARAMETER_C);
        float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        float amt2 = getRampedParameterValue(PARAMETER_D);
        double step2 = freq2 / rate;

        int size = buffer.getSize();

        for(int ch = 0; ch<buffer.getChannels(); ++ch)
        {
            float* buf = buffer.getSamples(ch);

            for (int i=0; i<size; ++i)
            {
                float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1
                float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1
                float gain1 = (amt1 * mod1) + (1 - amt1);
                float gain2 = (amt2 * mod2) + (1 - amt2);
                buf[i] = (gain1 * gain2) * buf[i];
                phase1 += step1;
                phase2 += step2;
            }
        }

    }
开发者ID:rdmontgomery,项目名称:OwlPatches,代码行数:32,代码来源:DualTremoloPatch.hpp

示例3: processAudio

  void processAudio(AudioBuffer &buffer){
    
    setCoeffs(getLpFreq(), 0.8f);
        
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value
        
    float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
        
    int size = buffer.getSize();
      
      for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
          float* buf = buffer.getSamples(ch);
          process(size, buf, outBuf);     // low pass filter for delay buffer
          
          for(int i = 0; i < size; i++){

              outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
              buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i];  //crossfade for wet/dry balance
              delayBuffer.write(buf[i]);
          }
      }
  }
开发者ID:paulmalyschko,项目名称:OwlNest,代码行数:25,代码来源:LpfDelayPatch.hpp

示例4: mixAudioBufferChannelsLogrithmicDRC

void mixAudioBufferChannelsLogrithmicDRC(AudioBuffer &audioBuffer, std::vector<float> &channelLevels, AudioBuffer &mixBuffer, float threshold)
{
	if(audioBuffer.getChannels() == 0)
		return;

	AudioFormat format=audioBuffer.getFormat();
	unsigned int samples=audioBuffer.getSamples();

	switch(format)
	{
	case AudioFormat::UInt8:
	case AudioFormat::UInt8P:
		mixChannelsLogrithmicDRC<uint8_t>((uint8_t *)audioBuffer.getBuffer(), channelLevels, (uint8_t *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Int16:
	case AudioFormat::Int16P:
		mixChannelsLogrithmicDRC<int16_t>((int16_t *)audioBuffer.getBuffer(), channelLevels, (int16_t *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Int32:
	case AudioFormat::Int32P:
		mixChannelsLogrithmicDRC<int32_t>((int32_t *)audioBuffer.getBuffer(), channelLevels, (int32_t *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Float:
	case AudioFormat::FloatP:
		mixChannelsLogrithmicDRC<float>((float *)audioBuffer.getBuffer(), channelLevels, (float *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Double:
	case AudioFormat::DoubleP:
		mixChannelsLogrithmicDRC<double>((double *)audioBuffer.getBuffer(), channelLevels, (double *)mixBuffer.getBuffer(), samples, threshold);
		break;
	}
}
开发者ID:InfiniteInteractive,项目名称:LimitlessSDK,代码行数:32,代码来源:audioMix.cpp

示例5: processAudio

 void processAudio(AudioBuffer &buffer)
 {
     // Reasonably assume we will not have more than 32 channels
     float*  ins[32];
     float*  outs[32];
     int     n = buffer.getChannels();
     
     if ( (fDSP.getNumInputs() < 32) && (fDSP.getNumOutputs() < 32) ) {
         
         // create the table of input channels
         for(int ch=0; ch<fDSP.getNumInputs(); ++ch) {
             ins[ch] = buffer.getSamples(ch%n);
         }
         
         // create the table of output channels
         for(int ch=0; ch<fDSP.getNumOutputs(); ++ch) {
             outs[ch] = buffer.getSamples(ch%n);
         }
         
         // read OWL parameters and updates corresponding Faust Widgets zones
         fUI.update(); 
         
         // Process the audio samples
         fDSP.compute(buffer.getSize(), ins, outs);
     }
 }
开发者ID:rdmontgomery,项目名称:OwlPatches,代码行数:26,代码来源:GuitarixDunwahPatch.hpp

示例6: process

void PatchController::process(AudioBuffer& buffer){
  if(activeSlot == GREEN && green.index != settings.patch_green){
    memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
    // green must be active slot when patch constructor is called
    green.setPatch(settings.patch_green);
    codec.softMute(false);
    debugClear();
    return;
  }else if(activeSlot == RED && red.index != settings.patch_red){
    memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
    // red must be active slot when constructor is called
    red.setPatch(settings.patch_red);
    codec.softMute(false);
    debugClear();
    return;
  }
  switch(mode){
  case SINGLE_MODE:
  case DUAL_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    break;
  case DUAL_RED_MODE:
    red.setParameterValues(getAnalogValues());
    red.patch->processAudio(buffer);
    break;
  case SERIES_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    red.patch->processAudio(buffer);
    break;
  case SERIES_RED_MODE:
    red.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    red.patch->processAudio(buffer);
    break;
  case PARALLEL_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    processParallel(buffer);
    break;
  case PARALLEL_RED_MODE:
    red.setParameterValues(getAnalogValues());
    processParallel(buffer);
    break;
  }
}
开发者ID:paulmalyschko,项目名称:OwlWare,代码行数:46,代码来源:PatchController.cpp

示例7: processAudio

 void processAudio(AudioBuffer &buffer){
   prepare();
   int size = buffer.getSize();
     
   for (int ch = 0; ch<buffer.getChannels(); ++ch) {
       float* buf = buffer.getSamples(ch);
       for(int i = 0; i < size; ++i) buf[i] = processSample(buf[i]);
   }
 }
开发者ID:paulmalyschko,项目名称:OwlNest,代码行数:9,代码来源:SampleBasedPatch.hpp

示例8: processAudio

  void processAudio(AudioBuffer &buffer){
    float gain = getParameterValue(PARAMETER_A)*2;
    int size = buffer.getSize();
    for(int ch=0; ch<buffer.getChannels(); ++ch){
      float* buf = buffer.getSamples(ch);
      for(int i=0; i<size; ++i)
	buf[i] = gain*buf[i];
    }
  }
开发者ID:DinoPollano,项目名称:OwlPatches,代码行数:9,代码来源:GainPatch.hpp

示例9: processAudio

    void processAudio(AudioBuffer &owlbuf)
    {
	float *in1;
	float *in2;
	float *out1;
	float *out2;
	float a, b, e, f, g, i;
	float e1=env1, e2=env2, e3=env3, e4=env4, y=dry;
	float a1=att1, a2=att2, r12=rel12, a34=att34, r3=rel3, r4=rel4;
	float fi=fili, fo=filo, fx=filx, fb1=fbuf1, fb2=fbuf2;
	int sampleFrames = owlbuf.getSize();

	if (owlbuf.getChannels() < 2) {  // Mono check
	    in1 = owlbuf.getSamples(0); // L
	    in2 = owlbuf.getSamples(0); // R
	    out1 = owlbuf.getSamples(0); // L
	    out2 = owlbuf.getSamples(0); // R
	} else {
	    in1 = owlbuf.getSamples(0); // L
	    in2 = owlbuf.getSamples(1); // R
	    out1 = owlbuf.getSamples(0); // L
	    out2 = owlbuf.getSamples(1); // R
	}
	setParameters();

	--in1;
	--in2;
	--out1;
	--out2;

	while(--sampleFrames >= 0)
	{
	    a = *++in1;
	    b = *++in2;

	    // Filter processing
	    fb1 = fo*fb1 + fi*a;
	    fb2 = fo*fb2 + fi*b;
	    e = fb1 + fx*a;
	    f = fb2 + fx*b;

	    i = a + b; i = (i>0)? i : -i; // stereo sum ; fabs()
	    e1 = (i>e1)? e1 + a1 * (i-e1) : e1 * r12;
	    e2 = (i>e2)? e2 + a2 * (i-e2) : e2 * r12;
	    e3 = (i>e3)? e3 + a34 * (i-e3) : e3 * r3;
	    e4 = (i>e4)? e4 + a34 * (i-e4) : e4 * r4;
	    g = (e1 - e2 + e3 - e4);

	    *++out1 = y * (a + e * g);
	    *++out2 = y * (b + f * g);
	}
	if(e1<1.0e-10) { env1=0.f; env2=0.f; env3=0.f; env4=0.f; fbuf1=0.f; fbuf2=0.f; }
	else { env1=e1;  env2=e2;  env3=e3;  env4=e4;  fbuf1=fb1; fbuf2=fb2; }
    }
开发者ID:DinoPollano,项目名称:OwlPatches,代码行数:54,代码来源:MdaTransientPatch.cpp

示例10: processAudio

  void processAudio(AudioBuffer &buffer){
    assert_param(buffer.getChannels() > 1);
    float gainL = getParameterValue(PARAMETER_A)*2;
    float gainR = getParameterValue(PARAMETER_B)*2;
    int size = buffer.getSize();
    float* left = buffer.getSamples(0);
    float* right = buffer.getSamples(1);
    for(int i=0; i<size; ++i){
	left[i] = gainL*left[i];
	right[i] = gainR*right[i];
    }
  }
开发者ID:chrissie-c,项目名称:OwlPatches,代码行数:12,代码来源:StereoGainPatch.hpp

示例11: processAudio

 void processAudio(AudioBuffer &buffer){
   int size = buffer.getSize();
   unsigned int delaySamples;
     
   rate     = getParameterValue(PARAMETER_A) * 0.000005f; // flanger needs slow rate
   depth    = getParameterValue(PARAMETER_B);
   feedback = getParameterValue(PARAMETER_C)* 0.707; // so we keep a -3dB summation of the delayed signal
     
   for (int ch = 0; ch<buffer.getChannels(); ++ch) {
       for (int i = 0 ; i < size; i++) {
           float* buf = buffer.getSamples(ch);
           delaySamples = (depth * modulate(rate)) * (delayBuffer.getSize()-1); // compute delay according to rate and depth
           buf[i] += feedback * delayBuffer.read(delaySamples); // add scaled delayed signal to dry signal
           delayBuffer.write(buf[i]); // update delay buffer
       }
   }
 }
开发者ID:DinoPollano,项目名称:OwlPatches,代码行数:17,代码来源:FlangerPatch.hpp

示例12: processAudio

  void processAudio(AudioBuffer &buffer){
//     assert_param(buffer.getChannels() > 1);
    float gainLL = getParameterValue(PARAMETER_A);
    float gainLR = getParameterValue(PARAMETER_B);
    float gainRL = getParameterValue(PARAMETER_C);
    float gainRR = getParameterValue(PARAMETER_D);
    int size = buffer.getSize();
    float* left = buffer.getSamples(0);
    float* right = buffer.getChannels() > 1 ? buffer.getSamples(1) : left;
    float l, r;
    for(int i=0; i<size; ++i){
      l = gainLL*left[i] + gainLR*right[i];
      r = gainRL*left[i] + gainRR*right[i];
      left[i] = l;
      right[i] = r;
    }
  }
开发者ID:DinoPollano,项目名称:OwlPatches,代码行数:17,代码来源:StereoMixerPatch.hpp

示例13: processAudio

 void processAudio(AudioBuffer &buffer) 
  {

    double rate = getSampleRate();
    
    if (circularBuffer==NULL)
    {
      bufferSize = MAX_DELAY * rate;
      circularBuffer = new float[bufferSize];
      memset(circularBuffer, 0, bufferSize*sizeof(float));
      writeIdx = 0;
    }

    float p1 = getRampedParameterValue(PARAMETER_A);
    float p2 = getRampedParameterValue(PARAMETER_B);
//    float p3 = getRampedParameterValue(PARAMETER_C);
    float p4 = getRampedParameterValue(PARAMETER_D);

    unsigned int maxSampleDelay = rate * (MIN_DELAY + p1*p1 * (MAX_DELAY-MIN_DELAY));
    float bias = MIN_BIAS + p2*p2 * (MAX_BIAS-MIN_BIAS);
    // float cutoff = p3;
    float dryWetMix = p4;
    
    int size = buffer.getSize();

	for(int ch = 0; ch<buffer.getChannels(); ++ch)
	 { 	
	    float* buf = buffer.getSamples(ch);
	    Random r;
	    for (int i=0; i<size; ++i)
	    {
	      int offset = floor(maxSampleDelay * pow(r.nextFloat(), bias) + 0.5);
	      int readIdx = writeIdx - offset;
	      while (readIdx<0)
		readIdx += bufferSize;

	      circularBuffer[writeIdx] = buf[i];
	      buf[i] =
		circularBuffer[readIdx] * dryWetMix +
		buf[i] * (1 - dryWetMix);

	      writeIdx = (++writeIdx) % bufferSize;
	    }
	 }
     
  }
开发者ID:DinoPollano,项目名称:OwlPatches,代码行数:46,代码来源:SampleJitterPatch.hpp

示例14: processAudio

 void processAudio(AudioBuffer &buffer)
  {
    const int size = buffer.getSize();
    const float coarsePitch = getRampedParameterValue(PARAMETER_A);
    const float finePitch = getRampedParameterValue(PARAMETER_B);
    const float decay = getRampedParameterValue(PARAMETER_C);
    const float mix = getRampedParameterValue(PARAMETER_D);
    
    if (coarsePitch != mPrevCoarsePitch || finePitch != mPrevFinePitch || decay != mPrevDecay)
    {
      const float freq = midi2CPS(MIN_PITCH + floor(mPrevCoarsePitch * PITCH_RANGE) + finePitch);
      
      for (int c = 0; c < NUM_COMBS; c++) 
      {
        mCombs[c].setFreqCPS(freq * FREQ_RATIOS[c]);
        mCombs[c].setDecayTimeMs(MIN_DECAY + (decay * DECAY_RANGE));
      }
      
      mPrevCoarsePitch = coarsePitch;
      mPrevFinePitch = finePitch;
      mPrevDecay = decay;
    }
      
	  for(int ch = 0; ch<buffer.getChannels(); ++ch)
	  {   
	    float* buf = buffer.getSamples(ch);
	    
	    for(int i = 0; i < size; i++)
	    {
	      float ips = buf[i];
	      float ops = 0.;
	      const float smoothMix = mMixSmoother.process(mix);
	      
	      for (int c = 0; c < NUM_COMBS; c++) 
	      {
		ops += mCombs[c].process(ips);
	      }
	      
	      buf[i] = mDCBlocker.process( ((ops * 0.1) * smoothMix) + (ips * (1.-smoothMix)) );
	    }
	  }
    
   
  }
开发者ID:mazbox,项目名称:OwlPatches,代码行数:44,代码来源:DroneBox.hpp

示例15: processAudio

 void processAudio(AudioBuffer &buffer){
     
   int size = buffer.getSize();
   float w, z;  //implement with less arrays?
   setCoeffs(getLpFreq(), 0.8f);
   rate = 0.01f, depth = 0.3f;
       
   float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
   float feedback  = getParameterValue(PARAMETER_B); // get feedback value
   float wetDry    = getParameterValue(PARAMETER_D); // get gain value
       
   float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
     
     for (int ch = 0; ch<buffer.getChannels(); ++ch) {
         
         float* buf = buffer.getSamples(ch);
         process(size, buf, outBuf);     // low pass filter for delay buffer
         
         float d  = _dmin + (_dmax-_dmin) * ((sin( _lfoPhase ) + 1.f)/2.f);
         
         _lfoPhase += rate;
         if( _lfoPhase >= M_PI * 2.f )
             _lfoPhase -= M_PI * 2.f;
         
         //update filter coeffs
         for( int i=0; i<6; i++ )
             _alps[i].Delay( d );
         
         for (int i = 0; i < size; i++){
             
             outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
             buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i];  //crossfade for wet/dry balance
             delayBuffer.write(buf[i]);
             
             //calculate output
             z = _alps[0].Update(_alps[1].Update(_alps[2].Update(_alps[3].Update(_alps[4].Update(_alps[5].Update(buf[i] + _zm1 * (feedback*0.1)))))));
             
             _zm1 = z;
             
             buf[i] = buf[i] + z * depth;
         }
     }
       
 }
开发者ID:paulmalyschko,项目名称:OwlNest,代码行数:44,代码来源:LpfDelayPhaserPatch.hpp


注:本文中的AudioBuffer::getChannels方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。