本文整理汇总了C++中AudioFormatReader类的典型用法代码示例。如果您正苦于以下问题:C++ AudioFormatReader类的具体用法?C++ AudioFormatReader怎么用?C++ AudioFormatReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioFormatReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: loadSound
void JuceBoxAudioProcessor::loadSound()
{
Logger::writeToLog("- loadSound().");
synth.clearSounds();
if (!sampleFile.existsAsFile()) {
Logger::writeToLog("\"" + sampleFile.getFullPathName() + "\" doesn't exist.");
return;
}
AudioFormatReader* reader = formatManager.createReaderFor(sampleFile);
if (reader == NULL) {
Logger::writeToLog("No reader for \"" + sampleFile.getFullPathName() + "\".");
return;
}
Logger::writeToLog("Format: " + reader->getFormatName());
Logger::writeToLog("Sample rate: " + String(reader->sampleRate));
Logger::writeToLog("length: " + String(reader->lengthInSamples));
Logger::writeToLog("numChannels: " + String(reader->numChannels));
BigInteger notes;
notes.setRange(0, 127, true);
SamplerSound* sound =
new SamplerSound(
sampleFile.getFileNameWithoutExtension(),
*reader,
notes,
72 /* C5 == middle C above A-440 */,
0.0, 0.01,
20.0 /* max time, hopefully 20s is enough for any sound */);
synth.addSound(sound);
delete reader;
}
示例2: file
SampleItem::SampleItem(const String &filename)
{
this->filename = filename;
File file(filename);
if (file.exists())
{
shortname = file.getFileNameWithoutExtension();
FileInputStream *stream = new FileInputStream(file);
WavAudioFormat format;
AudioFormatReader *reader = format.createReaderFor(stream, false);
if (reader)
{
formatName = reader->getFormatName();
sampleRate = (int)reader->sampleRate;
size = (int)reader->lengthInSamples;
bits = reader->bitsPerSample;
delete reader;
}
else
{
sampleRate = 0;
size = 0;
bits = 0;
}
}
playing = false;
}
示例3: AudioDeviceManager
//==============================================================================
MainContentComponent::MainContentComponent():readAheadThread("read Ahead thread"){
// Format manager
audioFormatManager.registerBasicFormats();
// Device manager
audioDeviceManager = new AudioDeviceManager();
audioDeviceManager->initialise(0, 2, 0, true);
readAheadThread.startThread(3);
// Read file
File sfile (File::getSpecialLocation (File::userDocumentsDirectory).getChildFile("lyd3_000_ortf_48k.wav"));
AudioFormatReader* audioFormatReader = audioFormatManager.createReaderFor(sfile);
ScopedPointer<AudioSampleBuffer> audioBuffer = new AudioSampleBuffer(1, audioFormatReader->lengthInSamples); //audioBuffer para leer archivo wav
audioFormatReader->read(audioBuffer, 0, audioFormatReader->lengthInSamples, 0, true, false);
// Cambiar Fs del AudioDevice segun sea la del archivo wav
juce::AudioDeviceManager::AudioDeviceSetup newAudioSetup;
audioDeviceManager->getAudioDeviceSetup(newAudioSetup);
newAudioSetup.sampleRate=audioFormatReader->sampleRate;
audioDeviceManager->setAudioDeviceSetup(newAudioSetup, true);
//Playback preparation
audioFormatReaderSource = new AudioFormatReaderSource(audioFormatReader, true);
audioTransportSource.setSource(audioFormatReaderSource, 32768, &readAheadThread, 0, 2); //el 0 es para que no haga resamplig....
audioSourcePlayer.setSource(&audioTransportSource); //..depende de haber cambiado Fs en AudioDevice
audioDeviceManager->addAudioCallback(&audioSourcePlayer);
//Playback start
audioTransportSource.start();
Logger::writeToLog ("Total length: --> " + String(audioFormatReader->lengthInSamples));
int bandas=10;
for (int i=0;i<bandas;i++) {
Buffer* buffer = new Buffer(audioFormatReader->lengthInSamples);
filteredAudioArray.add(buffer); //filteredAudioArray es un OwnedArray y debe ser declarado como una variable de la clase para que
} //le pertenezca y sea la clase la que lo borre.
filterBank = new FilterBank(bandas); //La clase filterBank necesita un WritePointer a un canal de un AudioSampleBuffer donde esta la entrada...
filterBank->setCoeficientes(); //y un puntero a un OwnedArray<Buffer> del tamaño del filterBank donde quedan diferentes salidas filtradas
filterBank->processSamples(audioBuffer->getWritePointer(0),&filteredAudioArray, audioFormatReader->lengthInSamples);
const int N = 1; //downsamplig rate
const int M = 1; //length fraction
bufferWaveform = new Buffer(audioFormatReader->lengthInSamples/(M*N)); //buffer para downSamplig con el que se pinta waveForm
audioDownSamplig(audioBuffer,bufferWaveform,N,M);
addAndMakeVisible(tabsComponent = new TabbedComponent(TabbedButtonBar::TabsAtTop));
tabsComponent->addTab("Respuesta al Impulso", Colour(0xff2f2f2f), new AudioWaveForm(bufferWaveform,true), true);
for(int i=0;i<bandas;i++){
tabsComponent->addTab("Filtered IR", Colour(0xff2f2f2f), new AudioWaveForm(filteredAudioArray.getUnchecked(i),true), true);
}
setSize (1200, 400);
}
示例4: STR
void CtrlrPanelResourceEditor::showResourceInfo(const int resourceIndex)
{
CtrlrPanelResource *res = resources[resourceIndex];
if (res == nullptr)
return;
String message;
message << "Load time: " + res->getLoadedTime().toString(true, true, true, true) + "\n";
message << "Data file: " + res->getFile().getFullPathName() + "\n";
message << "Source file: " + res->getSourceFile().getFullPathName() + "\n";
message << "Source hash: " + STR(res->getHashCode()) + "\n";
AudioFormatReader *afr = res->asAudioFormat();
if (afr)
{
message << "Type: Audio\n";
message << "Format name: " << afr->getFormatName() << "\n";
message << "Sample rate: " << afr->sampleRate << "\n";
message << "Bits per sample: " << ((int)afr->bitsPerSample) << "\n";
message << "Length in samples: " << afr->lengthInSamples << "\n";
message << "Number of channels: " << ((int)afr->numChannels) << "\n";
message << "Metadata:\n";
message << "\t" << afr->metadataValues.getDescription();
}
if (!res->asImage().isNull())
{
Image i = res->asImage();
message << "Type: Image\n";
message << "Width: " + STR(i.getWidth()) + "\n";
message << "Height: " + STR(i.getHeight()) + "\n";
message << "Has alpha: " + STR(i.hasAlphaChannel()) + "\n";
}
DialogWindow::LaunchOptions lo;
Label *l = new Label ("", message);
l->setSize (400, 150);
l->setJustificationType (Justification::centred);
l->setFont (Font(12.0f));
lo.content.set(l, true);
lo.componentToCentreAround = this;
lo.dialogBackgroundColour = Colours::whitesmoke;
lo.dialogTitle = "Resource information";
lo.resizable = true;
lo.useBottomRightCornerResizer = false;
lo.useNativeTitleBar = true;
lo.launchAsync();
}
示例5: name
SamplerSound::SamplerSound (const String& name_,
AudioFormatReader& source,
const BigInteger& midiNotes_,
const int midiNoteForNormalPitch,
const double attackTimeSecs,
const double releaseTimeSecs,
const double maxSampleLengthSeconds)
: name (name_),
midiNotes (midiNotes_),
midiRootNote (midiNoteForNormalPitch)
{
sourceSampleRate = source.sampleRate;
if (sourceSampleRate <= 0 || source.lengthInSamples <= 0)
{
length = 0;
attackSamples = 0;
releaseSamples = 0;
}
else
{
length = jmin ((int) source.lengthInSamples,
(int) (maxSampleLengthSeconds * sourceSampleRate));
data = new AudioSampleBuffer (jmin (2, (int) source.numChannels), length + 4);
source.read (data, 0, length + 4, 0, true, true);
attackSamples = roundToInt (attackTimeSecs * sourceSampleRate);
releaseSamples = roundToInt (releaseTimeSecs * sourceSampleRate);
}
}
示例6: name
SamplerSound::SamplerSound (const String& soundName,
AudioFormatReader& source,
const BigInteger& notes,
int midiNoteForNormalPitch,
double attackTimeSecs,
double releaseTimeSecs,
double maxSampleLengthSeconds)
: name (soundName),
sourceSampleRate (source.sampleRate),
midiNotes (notes),
midiRootNote (midiNoteForNormalPitch)
{
if (sourceSampleRate > 0 && source.lengthInSamples > 0)
{
length = jmin ((int) source.lengthInSamples,
(int) (maxSampleLengthSeconds * sourceSampleRate));
data = new AudioSampleBuffer (jmin (2, (int) source.numChannels), length + 4);
source.read (data, 0, length + 4, 0, true, true);
attackSamples = roundToInt (attackTimeSecs * sourceSampleRate);
releaseSamples = roundToInt (releaseTimeSecs * sourceSampleRate);
}
}
示例7: readChannels
static void readChannels (AudioFormatReader& reader, int** chans, AudioBuffer<float>* buffer,
int startSample, int numSamples, int64 readerStartSample, int numTargetChannels)
{
for (int j = 0; j < numTargetChannels; ++j)
chans[j] = reinterpret_cast<int*> (buffer->getWritePointer (j, startSample));
chans[numTargetChannels] = nullptr;
reader.read (chans, numTargetChannels, readerStartSample, numSamples, true);
}
示例8: load
bool SFZSample::load(AudioFormatManager* formatManager)
{
AudioFormatReader* reader = formatManager->createReaderFor(file);
if (reader == NULL)
return false;
sampleRate = reader->sampleRate;
sampleLength = reader->lengthInSamples;
// Read some extra samples, which will be filled with zeros, so interpolation
// can be done without having to check for the edge all the time.
buffer = new AudioSampleBuffer(reader->numChannels, sampleLength + 4);
reader->read(buffer, 0, sampleLength + 4, 0, true, true);
StringPairArray* metadata = &reader->metadataValues;
int numLoops = metadata->getValue("NumSampleLoops", "0").getIntValue();
if (numLoops > 0) {
loopStart = metadata->getValue("Loop0Start", "0").getLargeIntValue();
loopEnd = metadata->getValue("Loop0End", "0").getLargeIntValue();
}
delete reader;
return true;
}
示例9: readChannels
static void readChannels (AudioFormatReader& reader,
int** const chans, AudioSampleBuffer* const buffer,
const int startSample, const int numSamples,
const int64 readerStartSample, const int numTargetChannels)
{
for (int j = 0; j < numTargetChannels; ++j)
chans[j] = reinterpret_cast<int*> (buffer->getSampleData (j, startSample));
chans[numTargetChannels] = nullptr;
reader.read (chans, numTargetChannels, readerStartSample, numSamples, true);
}
示例10: fileName
void Sample::update(const String& path, WavAudioFormat& wavAudioFormat)
{
// Don't load a subsequent sample if a new sample is already loaded (but not yet played).
if (_readyToSwap)
return;
// Find audio file.
String fileName(path);
fileName = File::addTrailingSeparator(fileName);
fileName += _name;
fileName += EXT;
File file(fileName);
Time modification = file.getLastModificationTime();
if (modification <= _lastModification)
return;
// Read audio file. We only read the left channel, mono is good enough.
AudioFormatReader* reader = wavAudioFormat.createReaderFor(file.createInputStream(), true);
if (reader == nullptr)
return;
_lastModification = modification;
int64 start = reader->searchForLevel(0, reader->lengthInSamples, SAMPLE_START_THRESHOLD, 1.0, 0);
if (start == -1)
start = 0;
int count = (int)(reader->lengthInSamples - start);
_processor->writeTrace(String() << "Loading " << _name << " from disk (skip=" << start << ")");
int newIndex = !_bufferIndex;
AudioSampleBuffer* buffer = &(_buffers[newIndex]);
buffer->setSize(1, count);
reader->read(buffer, 0, count, start, true, false);
delete reader;
// Done.
_readyToSwap = true;
}
示例11: AudioFormatReader
//==============================================================================
MemoryMappedAudioFormatReader::MemoryMappedAudioFormatReader (const File& f, const AudioFormatReader& reader,
int64 start, int64 length, int frameSize)
: AudioFormatReader (nullptr, reader.getFormatName()), file (f),
dataChunkStart (start), dataLength (length), bytesPerFrame (frameSize)
{
sampleRate = reader.sampleRate;
bitsPerSample = reader.bitsPerSample;
lengthInSamples = reader.lengthInSamples;
numChannels = reader.numChannels;
metadataValues = reader.metadataValues;
usesFloatingPointData = reader.usesFloatingPointData;
}
示例12: note
MelodicSamplerSound::MelodicSamplerSound (String& filePath,
const int midiNote,
const int beginSamples,
const int numSamples) : note(midiNote) {
File *file = new File(filePath);
FileInputSource *source = new FileInputSource(*file, false);
InputStream *stream = source->createInputStream();
AudioFormatReader *reader = mp3Format->createReaderFor(stream, true);
sourceSampleRate = reader->sampleRate;
attackSamples = 0, releaseSamples = 0;
// attackSamples = roundToInt (attackTimeSec * sourceSampleRate);
// releaseSamples = roundToInt (releaseTimeSec * sourceSampleRate);
if (sourceSampleRate <= 0 || reader->lengthInSamples <= 0) {
length = 0;
} else {
int begin = jmin((int) reader->lengthInSamples - 1, beginSamples);
length = jmin ((int) reader->lengthInSamples - begin, numSamples);
data = new AudioSampleBuffer (jmin (2, (int) reader->numChannels), length + 4);
reader->read (data, 0, length + 4, begin, true, true);
}
}
示例13: writeFromAudioReader
bool AudioFormatWriter::writeFromAudioReader (AudioFormatReader& reader,
int64 startSample,
int64 numSamplesToRead)
{
const int bufferSize = 16384;
AudioSampleBuffer tempBuffer ((int) numChannels, bufferSize);
int* buffers [128] = { 0 };
for (int i = tempBuffer.getNumChannels(); --i >= 0;)
buffers[i] = reinterpret_cast<int*> (tempBuffer.getSampleData (i, 0));
if (numSamplesToRead < 0)
numSamplesToRead = reader.lengthInSamples;
while (numSamplesToRead > 0)
{
const int numToDo = (int) jmin (numSamplesToRead, (int64) bufferSize);
if (! reader.read (buffers, (int) numChannels, startSample, numToDo, false))
return false;
if (reader.usesFloatingPointData != isFloatingPoint())
{
int** bufferChan = buffers;
while (*bufferChan != nullptr)
{
void* const b = *bufferChan++;
if (isFloatingPoint())
FloatVectorOperations::convertFixedToFloat ((float*) b, (int*) b, 1.0f / 0x7fffffff, numToDo);
else
convertFloatsToInts ((int*) b, (float*) b, numToDo);
}
}
if (! write (const_cast <const int**> (buffers), numToDo))
return false;
numSamplesToRead -= numToDo;
startSample += numToDo;
}
return true;
}
示例14: loadIr
bool Mcfx_convolverAudioProcessor::loadIr(AudioSampleBuffer* IRBuffer, const File& audioFile, int channel, double &samplerate, float gain, int offset, int length)
{
if (!audioFile.existsAsFile())
{
std::cout << "ERROR: file does not exist!!" << std::endl;
return false;
}
AudioFormatManager formatManager;
// this can read .wav and .aiff
formatManager.registerBasicFormats();
AudioFormatReader* reader = formatManager.createReaderFor(audioFile);
if (!reader) {
std::cout << "ERROR: could not read impulse response file!" << std::endl;
return false;
}
//AudioFormatReader* reader = wavFormat.createMemoryMappedReader(audioFile);
int64 ir_length = (int)reader->lengthInSamples-offset;
if (ir_length <= 0) {
std::cout << "wav file has zero samples" << std::endl;
return false;
}
if (reader->numChannels <= channel) {
std::cout << "wav file doesn't have enough channels: " << reader->numChannels << std::endl;
return false;
}
AudioSampleBuffer ReadBuffer(reader->numChannels, ir_length); // create buffer
reader->read(&ReadBuffer, 0, ir_length, offset, true, true);
// set the samplerate -> maybe we have to resample later...
samplerate = reader->sampleRate;
//std::cout << "ReadRMS: " << ReadBuffer.getRMSLevel(channel, 0, ir_length) << std::endl;
// check if we want a shorter impulse response
if (ir_length > length && length != 0)
ir_length = length;
// copy the wanted channel into our IR Buffer
IRBuffer->setSize(1, ir_length);
IRBuffer->copyFrom(0, 0, ReadBuffer, channel, 0, ir_length);
// scale ir with gain
IRBuffer->applyGain(gain);
// std::cout << "ReadRMS: " << IRBuffer->getRMSLevel(0, 0, ir_length) << std::endl;
delete reader;
return true;
}
示例15: range
BufferingAudioReader::BufferedBlock::BufferedBlock (AudioFormatReader& reader, int64 pos, int numSamples)
: range (pos, pos + numSamples),
buffer ((int) reader.numChannels, numSamples)
{
reader.read (&buffer, 0, numSamples, pos, true, true);
}