本文整理汇总了C++中AudioChunk类的典型用法代码示例。如果您正苦于以下问题:C++ AudioChunk类的具体用法?C++ AudioChunk怎么用?C++ AudioChunk使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioChunk类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Run
NS_IMETHOD Run() override
{
auto engine =
static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
AudioChunk output;
output.SetNull(engine->mBufferSize);
{
auto node = static_cast<ScriptProcessorNode*>
(engine->NodeMainThread());
if (!node) {
return NS_OK;
}
if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) {
DispatchAudioProcessEvent(node, &output);
}
// The node may have been destroyed during event dispatch.
}
// Append it to our output buffer queue
engine->GetSharedBuffers()->FinishProducingOutputBuffer(output);
return NS_OK;
}
示例2: CopyChunkToBlock
static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
uint32_t aOffsetInBlock)
{
uint32_t blockChannels = aBlock->ChannelCount();
AutoTArray<const T*,2> channels;
if (aInput.IsNull()) {
channels.SetLength(blockChannels);
PodZero(channels.Elements(), blockChannels);
} else {
const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
channels.SetLength(inputChannels.Length());
PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
if (channels.Length() != blockChannels) {
// We only need to upmix here because aBlock's channel count has been
// chosen to be a superset of the channel count of every chunk.
AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
}
}
for (uint32_t c = 0; c < blockChannels; ++c) {
float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
if (channels[c]) {
ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
} else {
PodZero(outputData, aInput.GetDuration());
}
}
}
示例3:
void PAudioBuffer::sendToNetwork()
{
int toReach;
int i;
AudioChunk *chunk;
if (ABS(fRdIn - fWrIn) >= SOUNDBUFF_SIZE)
{
toReach = fRdIn + SOUNDBUFF_SIZE;
if (toReach >= fMaxIn)
toReach -= fMaxIn;
i = 0;
while (fRdIn != toReach)
{
_frameBuff[i++] = input[fRdIn++];
if (fRdIn >= fMaxIn)
fRdIn = 0;
}
chunk = _bridge.popUnused();
if (chunk == NULL)
return ;
chunk->clean();
//compressed = _codec->encode(_frameBuff, FRAME_PACKET_SIZE, encodedSize);
//chunk->assign(_frameBuff, (FRAME_PACKET_SIZE * sizeof(float)));
// Raw Mode
chunk->assign(_frameBuff, SOUNDBUFF_SIZE);
_bridge.inputPush(chunk);
}
}
示例4: pull
virtual void pull(AudioChunk &chunk)
{
if (!chunk.length()) return;
if (!valid || finished) {chunk.silence(); return;}
int samples, have = 0, need = chunk.length();
//Create pointers to 16-bit data
short *d16[PG_MAX_CHANNELS];
for (Uint32 i = 0; i < chunk.format().channels; ++i)
d16[i] = (short*) chunk.start(i);
while (true)
{
samples = stb_vorbis_get_samples_short(ogg,
chunk.format().channels, d16, (need-have));
if (samples < 0)
{
finished = true;
//cout << " VORBIS ERROR" << endl;
break;
}
if (samples == 0)
{
//File's end
if (loop)
{
stb_vorbis_seek_start(ogg);
continue;
}
else
{
finished = true;
break;
}
}
for (Uint32 i=0; i < chunk.format().channels; ++i)
d16[i] += samples;
have += samples;
//if (have > need) cout << "VORBIS OVERDRAW" << endl;
//std::cout << "OGG pull: " << have << "/" << need << std::endl;
if (have >= need) break;
}
//Cutoff marker if necessary
if (have < need) chunk.cutoff(have);
//Upsample data to 24-bit Sint32s
for (Uint32 i=0; i < chunk.format().channels; ++i)
{
Sint32 *start = chunk.start(i), *op = start + have;
short *ip = d16[i];
while (op!=start) {*(--op) = 256 * Sint32(*(--ip));}
}
}
示例5: MOZ_ASSERT
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 1;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
if (aPortIndex != mInputs[i]->InputNumber()) {
// This input is connected to a different port
continue;
}
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsAudioParamStream()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0 ||
(inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
aTmpChunk = *inputChunks[0];
return;
}
if (outputChannelCount == 0) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
}
}
示例6: TRACK_LOG
void
AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
TrackID aID,
StreamTime aTrackOffset,
uint32_t aTrackEvents,
const MediaSegment& aQueuedMedia)
{
if (mCanceled) {
return;
}
const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia);
// Check and initialize parameters for codec encoder.
if (!mInitialized) {
mInitCounter++;
TRACK_LOG(LogLevel::Debug, ("Init the audio encoder %d times", mInitCounter));
AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
while (!iter.IsEnded()) {
AudioChunk chunk = *iter;
// The number of channels is determined by the first non-null chunk, and
// thus the audio encoder is initialized at this time.
if (!chunk.IsNull()) {
nsresult rv = Init(chunk.mChannelData.Length(), aGraph->GraphRate());
if (NS_FAILED(rv)) {
LOG("[AudioTrackEncoder]: Fail to initialize the encoder!");
NotifyCancel();
}
break;
}
iter.Next();
}
mNotInitDuration += aQueuedMedia.GetDuration();
if (!mInitialized &&
(mNotInitDuration / aGraph->GraphRate() > INIT_FAILED_DURATION) &&
mInitCounter > 1) {
LOG("[AudioTrackEncoder]: Initialize failed for 30s.");
NotifyEndOfStream();
return;
}
}
// Append and consume this raw segment.
AppendAudioSegment(audio);
// The stream has stopped and reached the end of track.
if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
LOG("[AudioTrackEncoder]: Receive TRACK_EVENT_ENDED .");
NotifyEndOfStream();
}
}
示例7: while
void Splicer::pull(AudioChunk &chunk)
{
Uint32 left = chunk.length(), chans = chunk.format().channels;
Sint32 *data[PG_MAX_CHANNELS];
for (Uint32 i = 0; i < chans; ++i) data[i] = chunk.start(i);
//Query exhausted each loop to refresh the value of "current".
while (!exhausted())
{
//Pull data from next stream
AudioChunk sub(chunk.audio, output, data, left,
chunk.frame(), chunk.a(), chunk.b());
current->pull(sub);
//Partial advance
if (current->exhausted())
{
Uint32 cut = sub.cutoff();
for (Uint32 i = 0; i < chans; ++i) data[i] += cut;
left -= cut;
current = NULL;
if (left) continue;
}
return;
}
//The Splicer is exhausted!
chunk.cutoff(data[0] - chunk.start(0));
}
示例8: TRACK_LOG
void
AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
TrackID aID,
TrackRate aTrackRate,
TrackTicks aTrackOffset,
uint32_t aTrackEvents,
const MediaSegment& aQueuedMedia)
{
if (mCanceled) {
return;
}
const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia);
// Check and initialize parameters for codec encoder.
if (!mInitialized) {
#ifdef PR_LOGGING
mAudioInitCounter++;
TRACK_LOG(PR_LOG_DEBUG, ("Init the audio encoder %d times", mAudioInitCounter));
#endif
AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
while (!iter.IsEnded()) {
AudioChunk chunk = *iter;
// The number of channels is determined by the first non-null chunk, and
// thus the audio encoder is initialized at this time.
if (!chunk.IsNull()) {
nsresult rv = Init(chunk.mChannelData.Length(), aTrackRate);
if (NS_FAILED(rv)) {
LOG("[AudioTrackEncoder]: Fail to initialize the encoder!");
NotifyCancel();
}
break;
}
iter.Next();
}
}
// Append and consume this raw segment.
AppendAudioSegment(audio);
// The stream has stopped and reached the end of track.
if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) {
LOG("[AudioTrackEncoder]: Receive TRACK_EVENT_ENDED .");
NotifyEndOfStream();
}
}
示例9: CopyChunkToBlock
/**
* Copies the data in aInput to aOffsetInBlock within aBlock. All samples must
* be float. Both chunks must have the same number of channels (or else
* aInput is null). aBlock must have been allocated with AllocateInputBlock.
*/
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock, uint32_t aOffsetInBlock)
{
uint32_t d = aInput.GetDuration();
for (uint32_t i = 0; i < aBlock->mChannelData.Length(); ++i) {
float* out = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[i])) +
aOffsetInBlock;
if (aInput.IsNull()) {
PodZero(out, d);
} else {
const float* in = static_cast<const float*>(aInput.mChannelData[i]);
ConvertAudioSamplesWithScale(in, out, d, aInput.mVolume);
}
}
}
示例10: pull
void Bandpass::pull(AudioChunk &chunk,
const Bandpass_Node &a, const Bandpass_Node &b)
{
//Pull source data
source.pull(chunk);
//Calculate RC multipliers
float
al = RCCONV / ((a.low<=0.0f)?40000.0f:a.low),
ah = RCCONV / ((a.high<=0.0f)?10.0f:a.high),
bl = RCCONV / ((b.low<=0.0f)?40000.0f:b.low),
bh = RCCONV / ((b.high<=0.0f)?10.0f:b.high);
float lpRC = al, hpRC = ah,
lpM = pow(bl/al, 1.0f / float(chunk.length())),
hpM = pow(bh/ah, 1.0f / float(chunk.length())),
lpA, hpA, samp,
dt = 1.0f / float(chunk.format().rate);
//Apply effect!
Uint32 chan = source.format().channels;
for (Uint32 i = 0; i < chan; ++i)
{
Sint32 *pos = chunk.start(i), *end = chunk.end(i);
float &lpPc = lpP[i], &hpDc = hpD[i];
while (pos < end)
{
//Interpolate settings
lpA = dt / (lpRC + dt); lpRC *= lpM;
hpA = hpRC / (hpRC + dt); hpRC *= hpM;
//Get samples
samp = float(*pos);
//Lowpass
samp = lpPc + lpA * (samp-lpPc);
lpPc = samp;
//Highpass (confusing but correct)
samp = hpA * (samp+hpDc);
hpDc = samp - lpPc;
//Set samples
*pos = Sint32(samp);
++pos;
}
}
}
示例11: pull
void Signal::pull(AudioChunk &chunk) const
{
if (!data) {chunk.silence(); return;}
if (data->trans) data->trans->pull(chunk);
else data->stream->pull(chunk);
}
示例12: AudioBuffer
/* static */ already_AddRefed<AudioBuffer>
AudioBuffer::Create(nsPIDOMWindowInner* aWindow, float aSampleRate,
AudioChunk&& aInitialContents)
{
AudioChunk initialContents = aInitialContents;
ErrorResult rv;
RefPtr<AudioBuffer> buffer =
new AudioBuffer(aWindow, initialContents.ChannelCount(),
initialContents.mDuration, aSampleRate, rv);
if (rv.Failed()) {
return nullptr;
}
buffer->mSharedChannels = Move(aInitialContents);
return buffer.forget();
}
示例13: CopyChunkToBlock
/**
* Copies the data in aInput to aOffsetInBlock within aBlock.
* aBlock must have been allocated with AllocateInputBlock and have a channel
* count that's a superset of the channels in aInput.
*/
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
uint32_t aOffsetInBlock)
{
uint32_t blockChannels = aBlock->ChannelCount();
nsAutoTArray<const void*,2> channels;
if (aInput.IsNull()) {
channels.SetLength(blockChannels);
PodZero(channels.Elements(), blockChannels);
} else {
channels.SetLength(aInput.ChannelCount());
PodCopy(channels.Elements(), aInput.mChannelData.Elements(), channels.Length());
if (channels.Length() != blockChannels) {
// We only need to upmix here because aBlock's channel count has been
// chosen to be a superset of the channel count of every chunk.
AudioChannelsUpMix(&channels, blockChannels, nullptr);
}
}
uint32_t duration = aInput.GetDuration();
for (uint32_t c = 0; c < blockChannels; ++c) {
float* outputData =
static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c])) + aOffsetInBlock;
if (channels[c]) {
switch (aInput.mBufferFormat) {
case AUDIO_FORMAT_FLOAT32:
ConvertAudioSamplesWithScale(
static_cast<const float*>(channels[c]), outputData, duration,
aInput.mVolume);
break;
case AUDIO_FORMAT_S16:
ConvertAudioSamplesWithScale(
static_cast<const int16_t*>(channels[c]), outputData, duration,
aInput.mVolume);
break;
default:
NS_ERROR("Unhandled format");
}
} else {
PodZero(outputData, duration);
}
}
}
示例14: pull
virtual void pull(AudioChunk &chunk)
{
//This is possible at startup; race conditions are bad.
if (!back) chunk.silence();
//Shorthand.
Uint32 frame = back->mikeFrame, length = back->mikeLength;
const Sint32 *data = back->mikeData;
//How much information is available?
if (readFrame != frame) {readFrame = frame; prog = 0;}
Uint32 want = chunk.length(), get = std::min(length-prog, want);
//Read what we can from the buffer
std::memcpy((void*)chunk.start(0), (const void*)(data+prog), 4*get);
prog += get;
//Fill your cup too full and it will spill...
if (get < want)
std::memset((void*)(chunk.start(0)+get), 0, 4*(want-get));
}
示例15: EnsureTrack
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
StreamBuffer::Track* track = EnsureTrack();
AudioChunk outputChunk;
AudioSegment* segment = track->Get<AudioSegment>();
outputChunk.SetNull(0);
if (mInCycle) {
// XXX DelayNode not supported yet so just produce silence
outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
AudioChunk tmpChunk;
AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk);
bool finished = false;
mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished);
if (finished) {
FinishOutput();
}
}
mLastChunk = outputChunk;
if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
segment->AppendAndConsumeChunk(&outputChunk);
} else {
segment->AppendNullData(outputChunk.GetDuration());
}
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioChunk copyChunk = outputChunk;
AudioSegment tmpSegment;
tmpSegment.AppendAndConsumeChunk(©Chunk);
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(), segment->GetDuration(), 0,
tmpSegment);
}
}