本文整理汇总了C++中PodZero函数的典型用法代码示例。如果您正苦于以下问题:C++ PodZero函数的具体用法?C++ PodZero怎么用?C++ PodZero使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PodZero函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CopyChunkToBlock
static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
uint32_t aOffsetInBlock)
{
uint32_t blockChannels = aBlock->ChannelCount();
AutoTArray<const T*,2> channels;
if (aInput.IsNull()) {
channels.SetLength(blockChannels);
PodZero(channels.Elements(), blockChannels);
} else {
const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
channels.SetLength(inputChannels.Length());
PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
if (channels.Length() != blockChannels) {
// We only need to upmix here because aBlock's channel count has been
// chosen to be a superset of the channel count of every chunk.
AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
}
}
for (uint32_t c = 0; c < blockChannels; ++c) {
float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
if (channels[c]) {
ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
} else {
PodZero(outputData, aInput.GetDuration());
}
}
}
示例2: mImageContainer
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
: mImageContainer(aParams.mImageContainer)
, mTaskQueue(aParams.mTaskQueue)
, mInfo(aParams.VideoConfig())
, mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
{
MOZ_COUNT_CTOR(VPXDecoder);
PodZero(&mVPX);
PodZero(&mVPXAlpha);
}
示例3: m_frame
FFTConvolver::FFTConvolver(size_t fftSize)
: m_frame(fftSize)
, m_readWriteIndex(0)
{
m_inputBuffer.SetLength(fftSize);
PodZero(m_inputBuffer.Elements(), fftSize);
m_outputBuffer.SetLength(fftSize);
PodZero(m_outputBuffer.Elements(), fftSize);
m_lastOverlapBuffer.SetLength(fftSize / 2);
PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
}
示例4: ProcessBlock
void ProcessBlock(AudioNodeStream* aStream,
GraphTime aFrom,
const AudioBlock& aInput,
AudioBlock* aOutput,
bool* aFinished) override
{
// This node is not connected to anything. Per spec, we don't fire the
// onaudioprocess event. We also want to clear out the input and output
// buffer queue, and output a null buffer.
if (!mIsConnected) {
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
mSharedBuffers->Reset();
mInputWriteIndex = 0;
return;
}
// The input buffer is allocated lazily when non-null input is received.
if (!aInput.IsNull() && !mInputBuffer) {
mInputBuffer = ThreadSharedFloatArrayBufferList::
Create(mInputChannelCount, mBufferSize, fallible);
if (mInputBuffer && mInputWriteIndex) {
// Zero leading for null chunks that were skipped.
for (uint32_t i = 0; i < mInputChannelCount; ++i) {
float* channelData = mInputBuffer->GetDataForWrite(i);
PodZero(channelData, mInputWriteIndex);
}
}
}
// First, record our input buffer, if its allocation succeeded.
uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
for (uint32_t i = 0; i < inputChannelCount; ++i) {
float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
if (aInput.IsNull()) {
PodZero(writeData, aInput.GetDuration());
} else {
MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount);
AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
aInput.mVolume, writeData);
}
}
mInputWriteIndex += aInput.GetDuration();
// Now, see if we have data to output
// Note that we need to do this before sending the buffer to the main
// thread so that our delay time is updated.
*aOutput = mSharedBuffers->GetOutputBuffer();
if (mInputWriteIndex >= mBufferSize) {
SendBuffersToMainThread(aStream, aFrom);
mInputWriteIndex -= mBufferSize;
}
}
示例5: mImageContainer
VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
: mImageContainer(aParams.mImageContainer),
mImageAllocator(aParams.mKnowsCompositor),
mTaskQueue(aParams.mTaskQueue),
mInfo(aParams.VideoConfig()),
mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType)),
mLowLatency(
aParams.mOptions.contains(CreateDecoderParams::Option::LowLatency)) {
MOZ_COUNT_CTOR(VPXDecoder);
PodZero(&mVPX);
PodZero(&mVPXAlpha);
}
示例6: m_frame
FFTConvolver::FFTConvolver(size_t fftSize, size_t renderPhase)
: m_frame(fftSize)
, m_readWriteIndex(renderPhase % (fftSize / 2))
{
MOZ_ASSERT(fftSize >= 2 * WEBAUDIO_BLOCK_SIZE);
m_inputBuffer.SetLength(fftSize);
PodZero(m_inputBuffer.Elements(), fftSize);
m_outputBuffer.SetLength(fftSize);
PodZero(m_outputBuffer.Elements(), fftSize);
m_lastOverlapBuffer.SetLength(fftSize / 2);
PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2);
}
示例7: LOG
nsresult
AppleATDecoder::SetupDecoder(mp4_demuxer::MP4Sample* aSample)
{
if (mFormatID == kAudioFormatMPEG4AAC &&
mConfig.extended_profile == 2) {
// Check for implicit SBR signalling if stream is AAC-LC
// This will provide us with an updated magic cookie for use with
// GetInputAudioDescription.
if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
!mMagicCookie.Length()) {
// nothing found yet, will try again later
return NS_ERROR_NOT_INITIALIZED;
}
// An error occurred, fallback to using default stream description
}
LOG("Initializing Apple AudioToolbox decoder");
AudioStreamBasicDescription inputFormat;
PodZero(&inputFormat);
nsresult rv =
GetInputAudioDescription(inputFormat,
mMagicCookie.Length() ?
mMagicCookie : *mConfig.extra_data);
if (NS_FAILED(rv)) {
return rv;
}
// Fill in the output format manually.
PodZero(&mOutputFormat);
mOutputFormat.mFormatID = kAudioFormatLinearPCM;
mOutputFormat.mSampleRate = inputFormat.mSampleRate;
mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
mOutputFormat.mBitsPerChannel = 32;
mOutputFormat.mFormatFlags =
kLinearPCMFormatFlagIsFloat |
0;
#else
# error Unknown audio sample type
#endif
// Set up the decoder so it gives us one sample per frame
mOutputFormat.mFramesPerPacket = 1;
mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
= mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
if (status) {
LOG("Error %d constructing AudioConverter", status);
mConverter = nullptr;
return NS_ERROR_FAILURE;
}
return NS_OK;
}
示例8: mInfo
VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig())
, mTaskQueue(aParams.mTaskQueue)
, mPacketCount(0)
, mFrames(0)
{
// Zero these member vars to avoid crashes in Vorbis clear functions when
// destructor is called before |Init|.
PodZero(&mVorbisBlock);
PodZero(&mVorbisDsp);
PodZero(&mVorbisInfo);
PodZero(&mVorbisComment);
}
示例9: vorbis_info_init
RefPtr<MediaDataDecoder::InitPromise>
VorbisDataDecoder::Init()
{
vorbis_info_init(&mVorbisInfo);
vorbis_comment_init(&mVorbisComment);
PodZero(&mVorbisDsp);
PodZero(&mVorbisBlock);
AutoTArray<unsigned char*,4> headers;
AutoTArray<size_t,4> headerLens;
if (!XiphExtradataToHeaders(headers, headerLens,
mInfo.mCodecSpecificConfig->Elements(),
mInfo.mCodecSpecificConfig->Length())) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
for (size_t i = 0; i < headers.Length(); i++) {
if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
}
MOZ_ASSERT(mPacketCount == 3);
int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
if (r) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
if (r) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
LOG(LogLevel::Warning,
("Invalid Vorbis header: container and codec rate do not match!"));
}
if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
LOG(LogLevel::Warning,
("Invalid Vorbis header: container and codec channels do not match!"));
}
AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
if (!layout.IsValid()) {
return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
}
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
示例10: vorbis_info_init
nsresult
VorbisDataDecoder::Init()
{
vorbis_info_init(&mVorbisInfo);
vorbis_comment_init(&mVorbisComment);
PodZero(&mVorbisDsp);
PodZero(&mVorbisBlock);
size_t available = mInfo.mCodecSpecificConfig->Length();
uint8_t *p = mInfo.mCodecSpecificConfig->Elements();
for(int i = 0; i < 3; i++) {
if (available < 2) {
return NS_ERROR_FAILURE;
}
available -= 2;
size_t length = BigEndian::readUint16(p);
p += 2;
if (available < length) {
return NS_ERROR_FAILURE;
}
available -= length;
if (NS_FAILED(DecodeHeader((const unsigned char*)p, length))) {
return NS_ERROR_FAILURE;
}
p += length;
}
MOZ_ASSERT(mPacketCount == 3);
int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
if (r) {
return NS_ERROR_FAILURE;
}
r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
if (r) {
return NS_ERROR_FAILURE;
}
if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
LOG(LogLevel::Warning,
("Invalid Vorbis header: container and codec rate do not match!"));
}
if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
LOG(LogLevel::Warning,
("Invalid Vorbis header: container and codec channels do not match!"));
}
return NS_OK;
}
示例11: m_accumulationBuffer
ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer, bool directMode)
: m_accumulationBuffer(accumulationBuffer)
, m_accumulationReadIndex(0)
, m_inputReadIndex(0)
, m_directMode(directMode)
{
MOZ_ASSERT(impulseResponse);
MOZ_ASSERT(accumulationBuffer);
if (!m_directMode) {
m_fftKernel = new FFTBlock(fftSize);
m_fftKernel->PadAndMakeScaledDFT(impulseResponse + stageOffset, stageLength);
m_fftConvolver = new FFTConvolver(fftSize);
} else {
m_directKernel.SetLength(fftSize / 2);
PodCopy(m_directKernel.Elements(), impulseResponse + stageOffset, fftSize / 2);
m_directConvolver = new DirectConvolver(renderSliceSize);
}
m_temporaryBuffer.SetLength(renderSliceSize);
PodZero(m_temporaryBuffer.Elements(), m_temporaryBuffer.Length());
// The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset.
size_t totalDelay = stageOffset + reverbTotalLatency;
// But, the FFT convolution itself incurs fftSize / 2 latency, so subtract this out...
size_t halfSize = fftSize / 2;
if (!m_directMode) {
MOZ_ASSERT(totalDelay >= halfSize);
if (totalDelay >= halfSize)
totalDelay -= halfSize;
}
// We divide up the total delay, into pre and post delay sections so that we can schedule at exactly the moment when the FFT will happen.
// This is coordinated with the other stages, so they don't all do their FFTs at the same time...
int maxPreDelayLength = std::min(halfSize, totalDelay);
m_preDelayLength = totalDelay > 0 ? renderPhase % maxPreDelayLength : 0;
if (m_preDelayLength > totalDelay)
m_preDelayLength = 0;
m_postDelayLength = totalDelay - m_preDelayLength;
m_preReadWriteIndex = 0;
m_framesProcessed = 0; // total frames processed so far
size_t delayBufferSize = m_preDelayLength < fftSize ? fftSize : m_preDelayLength;
delayBufferSize = delayBufferSize < renderSliceSize ? renderSliceSize : delayBufferSize;
m_preDelayBuffer.SetLength(delayBufferSize);
PodZero(m_preDelayBuffer.Elements(), m_preDelayBuffer.Length());
}
示例12: PodZero
void
AppleATDecoder::SetupDecoder()
{
AudioStreamBasicDescription inputFormat, outputFormat;
// Fill in the input format description from the stream.
AppleUtils::GetProperty(mStream,
kAudioFileStreamProperty_DataFormat, &inputFormat);
// Fill in the output format manually.
PodZero(&outputFormat);
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mSampleRate = inputFormat.mSampleRate;
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
outputFormat.mBitsPerChannel = 32;
outputFormat.mFormatFlags =
kLinearPCMFormatFlagIsFloat |
0;
#else
# error Unknown audio sample type
#endif
// Set up the decoder so it gives us one sample per frame
outputFormat.mFramesPerPacket = 1;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
= outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;
OSStatus rv = AudioConverterNew(&inputFormat, &outputFormat, &mConverter);
if (rv) {
LOG("Error %d constructing AudioConverter", rv);
mConverter = nullptr;
mCallback->Error();
}
mHaveOutput = false;
}
示例13: mInfo
VorbisDataDecoder::VorbisDataDecoder(const AudioInfo& aConfig,
FlushableTaskQueue* aTaskQueue,
MediaDataDecoderCallback* aCallback)
: mInfo(aConfig)
, mTaskQueue(aTaskQueue)
, mCallback(aCallback)
, mPacketCount(0)
, mFrames(0)
{
// Zero these member vars to avoid crashes in Vorbis clear functions when
// destructor is called before |Init|.
PodZero(&mVorbisBlock);
PodZero(&mVorbisDsp);
PodZero(&mVorbisInfo);
PodZero(&mVorbisComment);
}
示例14: UpMixDownMixChunk
void
AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex,
const AudioBlock& aChunk,
AudioBlock* aBlock,
nsTArray<float>* aDownmixBuffer)
{
nsAutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = aBlock->ChannelFloatsForWrite(c);
if (inputData) {
if (aInputIndex == 0) {
AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
}
} else {
if (aInputIndex == 0) {
PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
}
}
}
}
示例15: MOZ_ASSERT
mozilla::ipc::IPCResult CompositorManagerParent::RecvReportMemory(
ReportMemoryResolver&& aResolver) {
MOZ_ASSERT(CompositorThreadHolder::IsInCompositorThread());
MemoryReport aggregate;
PodZero(&aggregate);
// Accumulate RenderBackend usage.
nsTArray<PCompositorBridgeParent*> compositorBridges;
ManagedPCompositorBridgeParent(compositorBridges);
for (auto bridge : compositorBridges) {
static_cast<CompositorBridgeParentBase*>(bridge)->AccumulateMemoryReport(
&aggregate);
}
// Accumulate Renderer usage asynchronously, and resolve.
//
// Note that the IPDL machinery requires aResolver to be called on this
// thread, so we can't just pass it over to the renderer thread. We use
// an intermediate MozPromise instead.
wr::RenderThread::AccumulateMemoryReport(aggregate)->Then(
CompositorThreadHolder::Loop()->SerialEventTarget(), __func__,
[resolver = std::move(aResolver)](MemoryReport aReport) {
resolver(aReport);
},
[](bool) {
MOZ_ASSERT_UNREACHABLE("MemoryReport promises are never rejected");
});
return IPC_OK();
}