本文整理汇总了C++中AudioNodeStream类的典型用法代码示例。如果您正苦于以下问题:C++ AudioNodeStream类的具体用法?C++ AudioNodeStream怎么用?C++ AudioNodeStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AudioNodeStream类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Context
void
AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv)
{
if (!WebAudioUtils::IsTimeValid(aWhen)) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return;
}
if (!mStartCalled) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
AudioNodeStream* ns = mStream;
if (!ns || !Context()) {
// We've already stopped and had our stream shut down
return;
}
ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen));
}
示例2:
void
AudioNodeStream::SetActive()
{
if (mIsActive || mMarkAsFinishedAfterThisBlock) {
return;
}
mIsActive = true;
if (IsAudioParamStream()) {
// Consumers merely influence stream order.
// They do not read from the stream.
return;
}
for (const auto& consumer : mConsumers) {
AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
if (ns) {
ns->IncrementActiveInputCount();
}
}
}
示例3:
void
AudioNode::DestroyMediaStream()
{
if (mStream) {
// Remove the node pointer on the engine.
AudioNodeStream* ns = mStream;
MOZ_ASSERT(ns, "How come we don't have a stream here?");
MOZ_ASSERT(ns->Engine()->NodeMainThread() == this,
"Invalid node reference");
ns->Engine()->ClearNode();
mStream->Destroy();
mStream = nullptr;
nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
if (obs) {
nsAutoString id;
id.AppendPrintf("%u", mId);
obs->NotifyObservers(nullptr, "webaudio-node-demise", id.get());
}
}
}
示例4: Context
void
AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv)
{
if (!mStartCalled) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
if (!mBuffer) {
// We don't have a buffer, so the stream is never marked as finished.
// Therefore we need to drop our playing ref right now.
mPlayingRef.Drop(this);
}
AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
if (!ns || !Context()) {
// We've already stopped and had our stream shut down
return;
}
ns->SetStreamTimeParameter(STOP, Context()->DestinationStream(),
std::max(0.0, aWhen));
}
示例5: SendOffsetAndDurationParametersToStream
void
AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx)
{
AudioNodeStream* ns = mStream;
if (!ns) {
return;
}
if (mBuffer) {
RefPtr<ThreadSharedFloatArrayBufferList> data =
mBuffer->GetThreadSharedChannelsForRate(aCx);
ns->SetBuffer(data.forget());
if (mStartCalled) {
SendOffsetAndDurationParametersToStream(ns);
}
} else {
ns->SetInt32Parameter(BUFFEREND, 0);
ns->SetBuffer(nullptr);
MarkInactive();
}
}
示例6: Context
void
OscillatorNode::Stop(double aWhen, ErrorResult& aRv)
{
if (!WebAudioUtils::IsTimeValid(aWhen)) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return;
}
if (!mStartCalled) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
if (!ns || !Context()) {
// We've already stopped and had our stream shut down
return;
}
// TODO: Perhaps we need to do more here.
ns->SetStreamTimeParameter(OscillatorNodeEngine::STOP,
Context(), std::max(0.0, aWhen));
}
示例7: SendOffsetAndDurationParametersToStream
void
AudioBufferSourceNode::Start(double aWhen, double aOffset,
const Optional<double>& aDuration, ErrorResult& aRv)
{
if (!WebAudioUtils::IsTimeValid(aWhen) ||
(aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value()))) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return;
}
if (mStartCalled) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
mStartCalled = true;
AudioNodeStream* ns = mStream;
if (!ns) {
// Nothing to play, or we're already dead for some reason
return;
}
// Remember our arguments so that we can use them when we get a new buffer.
mOffset = aOffset;
mDuration = aDuration.WasPassed() ? aDuration.Value()
: std::numeric_limits<double>::min();
// We can't send these parameters without a buffer because we don't know the
// buffer's sample rate or length.
if (mBuffer) {
SendOffsetAndDurationParametersToStream(ns);
}
// Don't set parameter unnecessarily
if (aWhen > 0.0) {
ns->SetDoubleParameter(START, mContext->DOMTimeToStreamTime(aWhen));
}
}
示例8: MOZ_ASSERT
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 1;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
if (aPortIndex != mInputs[i]->InputNumber()) {
// This input is connected to a different port
continue;
}
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsAudioParamStream()) {
continue;
}
// It is possible for mLastChunks to be empty here, because `a` might be a
// AudioNodeStream that has not been scheduled yet, because it is further
// down the graph _but_ as a connection to this node. Because we enforce the
// presence of at least one DelayNode, with at least one block of delay, and
// because the output of a DelayNode when it has been fed less that
// `delayTime` amount of audio is silence, we can simply continue here,
// because this input would not influence the output of this node. Next
// iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
// work as usual.
if (a->mLastChunks.IsEmpty()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0 ||
(inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
aTmpChunk = *inputChunks[0];
return;
}
if (outputChannelCount == 0) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
}
}
示例9: MOZ_ASSERT
AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 0;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread()) {
continue;
}
AudioChunk* chunk = &a->mLastChunk;
// XXX when we implement DelayNode, this will no longer be true and we'll
// need to treat a null chunk (when the DelayNode hasn't had a chance
// to produce data yet) as silence here.
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0) {
aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
return aTmpChunk;
}
if (inputChunkCount == 1) {
return inputChunks[0];
}
AllocateAudioBlock(outputChannelCount, aTmpChunk);
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
}
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c]));
if (inputData) {
if (i == 0) {
AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
}
} else {
if (i == 0) {
memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
}
}
}
}
return aTmpChunk;
}
示例10: MOZ_ASSERT
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 1;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
if (aPortIndex != mInputs[i]->InputNumber()) {
// This input is connected to a different port
continue;
}
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread() ||
a->IsAudioParamStream()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
switch (mChannelCountMode) {
case ChannelCountMode::Explicit:
// Disregard the output channel count that we've calculated, and just use
// mNumberOfInputChannels.
outputChannelCount = mNumberOfInputChannels;
break;
case ChannelCountMode::Clamped_max:
// Clamp the computed output channel count to mNumberOfInputChannels.
outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
break;
case ChannelCountMode::Max:
// Nothing to do here, just shut up the compiler warning.
break;
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0 ||
(inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
aTmpChunk = *inputChunks[0];
return;
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else {
// Fill up the remaining channels by zeros
for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
channels.AppendElement(silenceChannel);
}
}
} else if (channels.Length() > outputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(outputChannelCount);
downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < outputChannelCount; ++j) {
outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(channels, outputChannels.Elements(),
outputChannelCount, WEBAUDIO_BLOCK_SIZE);
channels.SetLength(outputChannelCount);
for (uint32_t j = 0; j < channels.Length(); ++j) {
channels[j] = outputChannels[j];
}
} else {
// Drop the remaining channels
channels.RemoveElementsAt(outputChannelCount,
channels.Length() - outputChannelCount);
}
}
//.........这里部分代码省略.........