本文整理汇总了C++中AudioBuffer::getBufferForChannel方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioBuffer::getBufferForChannel方法的具体用法?C++ AudioBuffer::getBufferForChannel怎么用?C++ AudioBuffer::getBufferForChannel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioBuffer
的用法示例。
在下文中一共展示了AudioBuffer::getBufferForChannel方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BaseAudioEvent
TEST( BaseAudioEvent, MixBufferLoopeableEvent )
{
BaseAudioEvent* audioEvent = new BaseAudioEvent();
int sourceSize = 16;
AudioBuffer* sourceBuffer = new AudioBuffer( 1, sourceSize );
SAMPLE_TYPE* rawBuffer = sourceBuffer->getBufferForChannel( 0 );
fillAudioBuffer( sourceBuffer );
audioEvent->setBuffer( sourceBuffer, false );
audioEvent->setLoopeable( true );
audioEvent->setSampleLength( 16 * 4 ); // thus will loop 4 times
audioEvent->positionEvent ( 0, 16, 0 );
// create an output buffer at a size smaller than the source buffer length
int outputSize = ( int )(( double ) sourceSize * .4 );
AudioBuffer* targetBuffer = new AudioBuffer( sourceBuffer->amountOfChannels, outputSize );
int minBufferPos = audioEvent->getSampleStart();
int bufferPos = minBufferPos;
int maxBufferPos = audioEvent->getSampleEnd();
// test the seamless mixing over multiple iterations
for ( ; bufferPos < maxBufferPos; bufferPos += outputSize )
{
// mix buffer contents
targetBuffer->silenceBuffers();
bool loopStarted = bufferPos + ( outputSize - 1 ) > maxBufferPos;
int loopOffset = ( maxBufferPos - bufferPos ) + 1;
audioEvent->mixBuffer( targetBuffer, bufferPos, minBufferPos, maxBufferPos, loopStarted, loopOffset, false );
// assert results
SAMPLE_TYPE* mixedBuffer = targetBuffer->getBufferForChannel( 0 );
for ( int i = 0; i < outputSize; ++i )
{
int compareOffset = ( bufferPos + i ) % sourceSize;
EXPECT_EQ( rawBuffer[ compareOffset ], mixedBuffer[ i ] )
<< "expected mixed buffer contents to equal the source contents at mixed offset " << i << " for source offset " << compareOffset;
}
}
delete targetBuffer;
delete sourceBuffer;
delete audioEvent;
}
示例2: createSampleFromBuffer
void JavaUtilities::createSampleFromBuffer( jstring aKey, jint aBufferLength, jint aChannelAmount, jdoubleArray aBuffer, jdoubleArray aOptRightBuffer )
{
AudioBuffer* sampleBuffer = new AudioBuffer( aChannelAmount, aBufferLength );
int i = 0;
// get a pointer to the Java array
jdouble* c_array;
c_array = JavaBridge::getEnvironment()->GetDoubleArrayElements( aBuffer, 0 );
// exception checking
if ( c_array == NULL )
return;
// copy buffer contents
SAMPLE_TYPE* channelBuffer = sampleBuffer->getBufferForChannel( 0 );
for ( i = 0; i < aBufferLength; i++ )
channelBuffer[ i ] = ( SAMPLE_TYPE ) c_array[ i ];
// release the memory so Java can have it again
JavaBridge::getEnvironment()->ReleaseDoubleArrayElements( aBuffer, c_array, 0 );
// stereo ?
if ( aChannelAmount == 2 )
{
c_array = JavaBridge::getEnvironment()->GetDoubleArrayElements( aOptRightBuffer, 0 );
// exception checking
if ( c_array == NULL )
return;
// copy buffer contents
channelBuffer = sampleBuffer->getBufferForChannel( 1 );
for ( i = 0; i < aBufferLength; i++ )
channelBuffer[ i ] = ( SAMPLE_TYPE ) c_array[ i ];
// release the memory so Java can have it again
JavaBridge::getEnvironment()->ReleaseDoubleArrayElements( aOptRightBuffer, c_array, 0 );
}
// convert jstring to std::string
const char* s = JavaBridge::getEnvironment()->GetStringUTFChars( aKey, NULL );
std::string theKey = s;
JavaBridge::getEnvironment()->ReleaseStringUTFChars( aKey, s );
SampleManager::setSample( theKey, sampleBuffer );
}
示例3: clone
AudioBuffer* AudioBuffer::clone()
{
AudioBuffer* output = new AudioBuffer( amountOfChannels, bufferSize );
for ( int i = 0; i < amountOfChannels; ++i )
{
SAMPLE_TYPE* sourceBuffer = getBufferForChannel( i );
SAMPLE_TYPE* targetBuffer = output->getBufferForChannel( i );
memcpy( targetBuffer, sourceBuffer, bufferSize * sizeof( SAMPLE_TYPE ));
}
return output;
}
示例4: writeBufferToFile
/**
* write the contents of the write buffer into
* an output file, this will only write content
* up until the point if was written to in case
* the buffer wasn't full yet
*/
void writeBufferToFile( int aSampleRate, int aNumChannels, bool broadcastUpdate )
{
// quick assertion
if ( cachedBuffer == 0 )
return;
// copy string contents for appending of filename
std::string outputFile = std::string( outputDirectory.c_str());
int bufferSize = outputBufferSize;
// recorded less than maximum available in buffer ? cut silence
// by writing recording into temporary buffers
if ( outputWriterIndex < bufferSize )
{
bufferSize = outputWriterIndex;
AudioBuffer* tempBuffer = new AudioBuffer( aNumChannels, bufferSize );
for ( int i = 0; i < bufferSize; ++i )
{
for ( int c = 0; c < aNumChannels; ++c )
tempBuffer->getBufferForChannel( c )[ i ] = cachedBuffer->getBufferForChannel( c )[ i ];
}
WaveWriter::bufferToFile( outputFile.append( SSTR( AudioEngine::recordingFileId )),
tempBuffer, aSampleRate );
// free memory allocated by temporary buffer
delete tempBuffer;
}
else
{
WaveWriter::bufferToFile( outputFile.append( SSTR( AudioEngine::recordingFileId )),
cachedBuffer, aSampleRate );
}
flushOutput(); // free memory
// broadcast update, pass buffer identifier to identify last recording
if ( broadcastUpdate )
Notifier::broadcast( Notifications::RECORDING_STATE_UPDATED, AudioEngine::recordingFileId );
}
示例5: SequencerController
TEST( AudioEngine, Output )
{
AudioEngine::test_program = 2; // help mocked OpenSL IO identify which test is running
AudioEngine::test_successful = false;
// prepare engine environment
SequencerController* controller = new SequencerController();
controller->prepare( 16, 48000, 130.0f, 4, 4 ); // 130 BPM in 4/4 time at 48 kHz sample rate w/buffer size of 16 samples
controller->setTempoNow( 130.0f, 4, 4 );
controller->rewind();
AudioEngine::volume = 1; // QQQ : later on we test mix volume ;)
// create an AudioEvent that holds a simple waveform
// the resulting 16 sample mono buffer contains the following samples:
//
// -1,-1,-1,-1,0,0,0,0,1,1,1,1,0,0,0,0
//
// the event will last for an entire measure in duration
BaseInstrument* instrument = new BaseInstrument();
BaseAudioEvent* event = new BaseAudioEvent( instrument );
AudioBuffer* buffer = new AudioBuffer( 1, 16 );
SAMPLE_TYPE* rawBuffer = buffer->getBufferForChannel( 0 );
for ( int i = 0; i < 4; ++i )
rawBuffer[ i ] = ( SAMPLE_TYPE ) -MAX_PHASE;
for ( int i = 4; i < 8; ++i )
rawBuffer[ i ] = ( SAMPLE_TYPE ) 0;
for ( int i = 8; i < 12; ++i )
rawBuffer[ i ] = ( SAMPLE_TYPE ) MAX_PHASE;
for ( int i = 12; i < 16; ++i )
rawBuffer[ i ] = ( SAMPLE_TYPE ) 0;
event->setBuffer( buffer, false );
event->setLoopeable( true );
event->setSampleLength( AudioEngine::samples_per_bar );
event->positionEvent( 0, 16, 0 );
event->addToSequencer();
// start the engine
controller->setPlaying( true );
AudioEngine::start();
// evaluate results (assertions are made in mock_opensl_io.cpp)
ASSERT_TRUE( AudioEngine::test_successful )
<< "expected test to be successful";
EXPECT_EQ( 3, AudioEngine::test_program )
<< "expected test program to have incremented";
// clean up
controller->setPlaying( false );
AudioEngine::render_iterations = 0;
delete controller;
delete instrument;
delete event;
delete buffer;
}
示例6: start
/**
* starts the render thread
* NOTE: the render thread is always active, even when the
* sequencer is paused
*/
void start()
{
OPENSL_STREAM *p;
p = android_OpenAudioDevice( AudioEngineProps::SAMPLE_RATE, AudioEngineProps::INPUT_CHANNELS,
AudioEngineProps::OUTPUT_CHANNELS, AudioEngineProps::BUFFER_SIZE );
// hardware unavailable ? halt thread, trigger JNI callback for error handler
if ( p == NULL )
{
Observer::handleHardwareUnavailable();
return;
}
// audio hardware available, start render thread
int buffer_size, i, c, ci;
buffer_size = AudioEngineProps::BUFFER_SIZE;
int outputChannels = AudioEngineProps::OUTPUT_CHANNELS;
bool isMono = outputChannels == 1;
std::vector<AudioChannel*> channels;
std::vector<AudioChannel*> channels2; // used when loop starts for gathering events at the start range
bool loopStarted = false; // whether the current buffer will exceed the end offset of the loop (read remaining samples from the start)
int loopOffset = 0; // the offset within the current buffer where we start reading from the current loops start offset
int loopAmount = 0; // amount of samples we must read from the current loops start offset
float recbufferIn [ buffer_size ]; // used for recording from device input
float outbuffer [ buffer_size * outputChannels ]; // the output buffer rendered by the hardware
// generate buffers for temporary channel buffer writes
AudioBuffer* channelBuffer = new AudioBuffer( outputChannels, buffer_size );
AudioBuffer* inbuffer = new AudioBuffer( outputChannels, buffer_size ); // accumulates all channels ("master strip")
AudioBuffer* recbuffer = new AudioBuffer( AudioEngineProps::INPUT_CHANNELS, buffer_size );
thread = 1;
// signal processors
Finalizer* limiter = new Finalizer ( 2, 500, AudioEngineProps::SAMPLE_RATE, outputChannels );
LPFHPFilter* hpf = new LPFHPFilter(( float ) AudioEngineProps::SAMPLE_RATE, 55, outputChannels );
while ( thread )
{
// erase previous buffer contents
inbuffer->silenceBuffers();
// gather the audio events by the buffer range currently being processed
int endPosition = bufferPosition + buffer_size;
channels = sequencer::getAudioEvents( channels, bufferPosition, endPosition, true );
// read pointer exceeds maximum allowed offset ? => sequencer has started its loop
// we must now also gather extra events at the start position of the seq. range
loopStarted = endPosition > max_buffer_position;
loopOffset = (( max_buffer_position + 1 ) - bufferPosition );
loopAmount = buffer_size - loopOffset;
if ( loopStarted )
{
// were we bouncing the audio ? save file and stop rendering
if ( bouncing )
{
DiskWriter::writeBufferToFile( AudioEngineProps::SAMPLE_RATE, AudioEngineProps::OUTPUT_CHANNELS, false );
// broadcast update via JNI, pass buffer identifier name to identify last recording
Observer::handleBounceComplete( 1 );
thread = 0; // stop thread, halts rendering
break;
}
else
{
endPosition -= max_buffer_position;
channels2 = sequencer::getAudioEvents( channels2, min_buffer_position, min_buffer_position + buffer_size, false );
// er? the channels are magically merged by above invocation..., performing the insert below adds the same events TWICE*POP*!?!?
//channels.insert( channels.end(), channels2.begin(), channels2.end() ); // merge the channels into one
channels2.clear(); // would clear on next "getAudioEvents"-query... but why wait ?
}
}
// record audio from Android device ?
if ( recordFromDevice && AudioEngineProps::INPUT_CHANNELS > 0 )
{
int recSamps = android_AudioIn( p, recbufferIn, AudioEngineProps::BUFFER_SIZE );
SAMPLE_TYPE* recBufferChannel = recbuffer->getBufferForChannel( 0 );
for ( int j = 0; j < recSamps; ++j )
{
recBufferChannel[ j ] = recbufferIn[ j ];//static_cast<float>( recbufferIn[ j ] );
// merge recording into current input buffer for instant monitoring
if ( monitorRecording )
{
for ( int k = 0; k < outputChannels; ++k )
inbuffer->getBufferForChannel( k )[ j ] = recBufferChannel[ j ];
}
//.........这里部分代码省略.........