本文整理汇总了C++中AudioBuffer::silenceBuffers方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioBuffer::silenceBuffers方法的具体用法?C++ AudioBuffer::silenceBuffers怎么用?C++ AudioBuffer::silenceBuffers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioBuffer
的用法示例。
在下文中一共展示了AudioBuffer::silenceBuffers方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BaseAudioEvent
TEST( BaseAudioEvent, MixBufferLoopeableEvent )
{
BaseAudioEvent* audioEvent = new BaseAudioEvent();
int sourceSize = 16;
AudioBuffer* sourceBuffer = new AudioBuffer( 1, sourceSize );
SAMPLE_TYPE* rawBuffer = sourceBuffer->getBufferForChannel( 0 );
fillAudioBuffer( sourceBuffer );
audioEvent->setBuffer( sourceBuffer, false );
audioEvent->setLoopeable( true );
audioEvent->setSampleLength( 16 * 4 ); // thus will loop 4 times
audioEvent->positionEvent ( 0, 16, 0 );
// create an output buffer at a size smaller than the source buffer length
int outputSize = ( int )(( double ) sourceSize * .4 );
AudioBuffer* targetBuffer = new AudioBuffer( sourceBuffer->amountOfChannels, outputSize );
int minBufferPos = audioEvent->getSampleStart();
int bufferPos = minBufferPos;
int maxBufferPos = audioEvent->getSampleEnd();
// test the seamless mixing over multiple iterations
for ( ; bufferPos < maxBufferPos; bufferPos += outputSize )
{
// mix buffer contents
targetBuffer->silenceBuffers();
bool loopStarted = bufferPos + ( outputSize - 1 ) > maxBufferPos;
int loopOffset = ( maxBufferPos - bufferPos ) + 1;
audioEvent->mixBuffer( targetBuffer, bufferPos, minBufferPos, maxBufferPos, loopStarted, loopOffset, false );
// assert results
SAMPLE_TYPE* mixedBuffer = targetBuffer->getBufferForChannel( 0 );
for ( int i = 0; i < outputSize; ++i )
{
int compareOffset = ( bufferPos + i ) % sourceSize;
EXPECT_EQ( rawBuffer[ compareOffset ], mixedBuffer[ i ] )
<< "expected mixed buffer contents to equal the source contents at mixed offset " << i << " for source offset " << compareOffset;
}
}
delete targetBuffer;
delete sourceBuffer;
delete audioEvent;
}
示例2: start
/**
* starts the render thread
* NOTE: the render thread is always active, even when the
* sequencer is paused
*/
void start()
{
OPENSL_STREAM *p;
p = android_OpenAudioDevice( AudioEngineProps::SAMPLE_RATE, AudioEngineProps::INPUT_CHANNELS,
AudioEngineProps::OUTPUT_CHANNELS, AudioEngineProps::BUFFER_SIZE );
// hardware unavailable ? halt thread, trigger JNI callback for error handler
if ( p == NULL )
{
Observer::handleHardwareUnavailable();
return;
}
// audio hardware available, start render thread
int buffer_size, i, c, ci;
buffer_size = AudioEngineProps::BUFFER_SIZE;
int outputChannels = AudioEngineProps::OUTPUT_CHANNELS;
bool isMono = outputChannels == 1;
std::vector<AudioChannel*> channels;
std::vector<AudioChannel*> channels2; // used when loop starts for gathering events at the start range
bool loopStarted = false; // whether the current buffer will exceed the end offset of the loop (read remaining samples from the start)
int loopOffset = 0; // the offset within the current buffer where we start reading from the current loops start offset
int loopAmount = 0; // amount of samples we must read from the current loops start offset
float recbufferIn [ buffer_size ]; // used for recording from device input
float outbuffer [ buffer_size * outputChannels ]; // the output buffer rendered by the hardware
// generate buffers for temporary channel buffer writes
AudioBuffer* channelBuffer = new AudioBuffer( outputChannels, buffer_size );
AudioBuffer* inbuffer = new AudioBuffer( outputChannels, buffer_size ); // accumulates all channels ("master strip")
AudioBuffer* recbuffer = new AudioBuffer( AudioEngineProps::INPUT_CHANNELS, buffer_size );
thread = 1;
// signal processors
Finalizer* limiter = new Finalizer ( 2, 500, AudioEngineProps::SAMPLE_RATE, outputChannels );
LPFHPFilter* hpf = new LPFHPFilter(( float ) AudioEngineProps::SAMPLE_RATE, 55, outputChannels );
while ( thread )
{
// erase previous buffer contents
inbuffer->silenceBuffers();
// gather the audio events by the buffer range currently being processed
int endPosition = bufferPosition + buffer_size;
channels = sequencer::getAudioEvents( channels, bufferPosition, endPosition, true );
// read pointer exceeds maximum allowed offset ? => sequencer has started its loop
// we must now also gather extra events at the start position of the seq. range
loopStarted = endPosition > max_buffer_position;
loopOffset = (( max_buffer_position + 1 ) - bufferPosition );
loopAmount = buffer_size - loopOffset;
if ( loopStarted )
{
// were we bouncing the audio ? save file and stop rendering
if ( bouncing )
{
DiskWriter::writeBufferToFile( AudioEngineProps::SAMPLE_RATE, AudioEngineProps::OUTPUT_CHANNELS, false );
// broadcast update via JNI, pass buffer identifier name to identify last recording
Observer::handleBounceComplete( 1 );
thread = 0; // stop thread, halts rendering
break;
}
else
{
endPosition -= max_buffer_position;
channels2 = sequencer::getAudioEvents( channels2, min_buffer_position, min_buffer_position + buffer_size, false );
// er? the channels are magically merged by above invocation..., performing the insert below adds the same events TWICE*POP*!?!?
//channels.insert( channels.end(), channels2.begin(), channels2.end() ); // merge the channels into one
channels2.clear(); // would clear on next "getAudioEvents"-query... but why wait ?
}
}
// record audio from Android device ?
if ( recordFromDevice && AudioEngineProps::INPUT_CHANNELS > 0 )
{
int recSamps = android_AudioIn( p, recbufferIn, AudioEngineProps::BUFFER_SIZE );
SAMPLE_TYPE* recBufferChannel = recbuffer->getBufferForChannel( 0 );
for ( int j = 0; j < recSamps; ++j )
{
recBufferChannel[ j ] = recbufferIn[ j ];//static_cast<float>( recbufferIn[ j ] );
// merge recording into current input buffer for instant monitoring
if ( monitorRecording )
{
for ( int k = 0; k < outputChannels; ++k )
inbuffer->getBufferForChannel( k )[ j ] = recBufferChannel[ j ];
}
//.........这里部分代码省略.........