本文整理汇总了C++中AudioContext::SampleRate方法的典型用法代码示例。如果您正苦于以下问题:C++ AudioContext::SampleRate方法的具体用法?C++ AudioContext::SampleRate怎么用?C++ AudioContext::SampleRate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AudioContext
的用法示例。
在下文中一共展示了AudioContext::SampleRate方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DispatchAudioProcessEvent
// Sets up |output| iff buffers are set in event handlers.
void DispatchAudioProcessEvent(ScriptProcessorNode* aNode,
AudioChunk* aOutput)
{
AudioContext* context = aNode->Context();
if (!context) {
return;
}
AutoJSAPI jsapi;
if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
return;
}
JSContext* cx = jsapi.cx();
uint32_t inputChannelCount = aNode->ChannelCount();
// Create the input buffer
RefPtr<AudioBuffer> inputBuffer;
if (mInputBuffer) {
ErrorResult rv;
inputBuffer =
AudioBuffer::Create(context->GetOwner(), inputChannelCount,
aNode->BufferSize(), context->SampleRate(),
mInputBuffer.forget(), rv);
if (rv.Failed()) {
rv.SuppressException();
return;
}
}
// Ask content to produce data in the output buffer
// Note that we always avoid creating the output buffer here, and we try to
// avoid creating the input buffer as well. The AudioProcessingEvent class
// knows how to lazily create them if needed once the script tries to access
// them. Otherwise, we may be able to get away without creating them!
RefPtr<AudioProcessingEvent> event =
new AudioProcessingEvent(aNode, nullptr, nullptr);
event->InitEvent(inputBuffer, inputChannelCount, mPlaybackTime);
aNode->DispatchTrustedEvent(event);
// Steal the output buffers if they have been set.
// Don't create a buffer if it hasn't been used to return output;
// FinishProducingOutputBuffer() will optimize output = null.
// GetThreadSharedChannelsForRate() may also return null after OOM.
if (event->HasOutputBuffer()) {
ErrorResult rv;
AudioBuffer* buffer = event->GetOutputBuffer(rv);
// HasOutputBuffer() returning true means that GetOutputBuffer()
// will not fail.
MOZ_ASSERT(!rv.Failed());
*aOutput = buffer->GetThreadSharedChannelsForRate(cx);
MOZ_ASSERT(aOutput->IsNull() ||
aOutput->mBufferFormat == AUDIO_FORMAT_FLOAT32,
"AudioBuffers initialized from JS have float data");
}
}
示例2: AudioNodeExternalInputStream
/* static */
already_AddRefed<AudioNodeExternalInputStream>
AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
AudioNodeEngine* aEngine) {
AudioContext* ctx = aEngine->NodeMainThread()->Context();
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aGraph->GraphRate() == ctx->SampleRate());
RefPtr<AudioNodeExternalInputStream> stream =
new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate());
stream->mSuspendedCount += ctx->ShouldSuspendNewStream();
aGraph->AddStream(stream);
return stream.forget();
}
示例3: Create
/* static */ already_AddRefed<AudioBuffer>
AudioBuffer::Constructor(const GlobalObject& aGlobal,
AudioContext& aAudioContext,
const AudioBufferOptions& aOptions,
ErrorResult& aRv)
{
if (!aOptions.mNumberOfChannels) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return nullptr;
}
float sampleRate = aOptions.mSampleRate.WasPassed()
? aOptions.mSampleRate.Value()
: aAudioContext.SampleRate();
return Create(&aAudioContext, aOptions.mNumberOfChannels, aOptions.mLength,
sampleRate, aRv);
}