本文整理汇总了C++中EncodedFrameContainer类的典型用法代码示例。如果您正苦于以下问题:C++ EncodedFrameContainer类的具体用法?C++ EncodedFrameContainer怎么用?C++ EncodedFrameContainer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了EncodedFrameContainer类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: while
nsresult
VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
{
vpx_codec_iter_t iter = nullptr;
EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
nsTArray<uint8_t> frameData;
const vpx_codec_cx_pkt_t *pkt = nullptr;
while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
// Copy the encoded data from libvpx to frameData
frameData.AppendElements((uint8_t*)pkt->data.frame.buf,
pkt->data.frame.sz);
break;
}
default: {
break;
}
}
// End of frame
if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
frameType = EncodedFrame::VP8_I_FRAME;
}
break;
}
}
if (!frameData.IsEmpty() &&
(pkt->data.frame.pts == mEncodedTimestamp)) {
// Copy the encoded data to aData.
EncodedFrame* videoData = new EncodedFrame();
videoData->SetFrameType(frameType);
// Convert the timestamp and duration to Usecs.
CheckedInt64 timestamp = FramesToUsecs(mEncodedTimestamp, mTrackRate);
if (timestamp.isValid()) {
videoData->SetTimeStamp(
(uint64_t)FramesToUsecs(mEncodedTimestamp, mTrackRate).value());
}
CheckedInt64 duration = FramesToUsecs(pkt->data.frame.duration, mTrackRate);
if (duration.isValid()) {
videoData->SetDuration(
(uint64_t)FramesToUsecs(pkt->data.frame.duration, mTrackRate).value());
}
videoData->SwapInFrameData(frameData);
VP8LOG("GetEncodedPartitions TimeStamp %lld Duration %lld\n",
videoData->GetTimeStamp(), videoData->GetDuration());
VP8LOG("frameType %d\n", videoData->GetFrameType());
aData.AppendEncodedFrame(videoData);
}
return NS_OK;
}
示例2: LOG
nsresult
OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
PROFILER_LABEL("OggWriter", "WriteEncodedTrack",
js::ProfileEntry::Category::OTHER);
for (uint32_t i = 0; i < aData.GetEncodedFrames().Length(); i++) {
if (aData.GetEncodedFrames()[i]->GetFrameType() != EncodedFrame::OPUS_AUDIO_FRAME) {
LOG("[OggWriter] wrong encoded data type!");
return NS_ERROR_FAILURE;
}
nsresult rv = WriteEncodedData(aData.GetEncodedFrames()[i]->GetFrameData(),
aData.GetEncodedFrames()[i]->GetDuration(),
aFlags);
if (NS_FAILED(rv)) {
LOG("%p Failed to WriteEncodedTrack!", this);
return rv;
}
}
return NS_OK;
}
示例3: while
void
VorbisTrackEncoder::GetEncodedFrames(EncodedFrameContainer& aData)
{
// vorbis does some data preanalysis, then divvies up blocks for
// more involved (potentially parallel) processing. Get a single
// block for encoding now.
while (vorbis_analysis_blockout(&mVorbisDsp, &mVorbisBlock) == 1) {
ogg_packet oggPacket;
if (vorbis_analysis(&mVorbisBlock, &oggPacket) == 0) {
VORBISLOG("vorbis_analysis_blockout block size %d", oggPacket.bytes);
EncodedFrame* audiodata = new EncodedFrame();
audiodata->SetFrameType(EncodedFrame::AUDIO_FRAME);
nsTArray<uint8_t> frameData;
frameData.AppendElements(oggPacket.packet, oggPacket.bytes);
audiodata->SetFrameData(&frameData);
aData.AppendEncodedFrame(audiodata);
}
}
}
示例4: if
nsresult
OmxAudioTrackEncoder::AppendEncodedFrames(EncodedFrameContainer& aContainer)
{
nsTArray<uint8_t> frameData;
int outFlags = 0;
int64_t outTimeUs = -1;
nsresult rv = mEncoder->GetNextEncodedFrame(&frameData, &outTimeUs, &outFlags,
3000); // wait up to 3ms
NS_ENSURE_SUCCESS(rv, rv);
if (!frameData.IsEmpty()) {
bool isCSD = false;
if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) { // codec specific data
isCSD = true;
} else if (outFlags & OMXCodecWrapper::BUFFER_EOS) { // last frame
mEncodingComplete = true;
}
nsRefPtr<EncodedFrame> audiodata = new EncodedFrame();
if (mEncoder->GetCodecType() == OMXCodecWrapper::AAC_ENC) {
audiodata->SetFrameType(isCSD ?
EncodedFrame::AAC_CSD : EncodedFrame::AAC_AUDIO_FRAME);
} else if (mEncoder->GetCodecType() == OMXCodecWrapper::AMR_NB_ENC){
audiodata->SetFrameType(isCSD ?
EncodedFrame::AMR_AUDIO_CSD : EncodedFrame::AMR_AUDIO_FRAME);
} else {
MOZ_ASSERT(false, "audio codec not supported");
}
audiodata->SetTimeStamp(outTimeUs);
audiodata->SwapInFrameData(frameData);
aContainer.AppendEncodedFrame(audiodata);
}
return NS_OK;
}
示例5: mon
nsresult
OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
VideoSegment segment;
{
// Move all the samples from mRawSegment to segment. We only hold the
// monitor in this block.
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait if mEncoder is not initialized nor is being canceled.
while (!mCanceled && (!mInitialized ||
(mRawSegment.GetDuration() == 0 && !mEndOfStream))) {
mReentrantMonitor.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
segment.AppendFrom(&mRawSegment);
}
nsresult rv;
// Start queuing raw frames to the input buffers of OMXCodecWrapper.
VideoSegment::ChunkIterator iter(segment);
while (!iter.IsEnded()) {
VideoChunk chunk = *iter;
// Send only the unique video frames to OMXCodecWrapper.
if (mLastFrame != chunk.mFrame) {
uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
layers::Image* img = (chunk.IsNull() || chunk.mFrame.GetForceBlack()) ?
nullptr : chunk.mFrame.GetImage();
rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs);
NS_ENSURE_SUCCESS(rv, rv);
}
mLastFrame.TakeFrom(&chunk.mFrame);
mTotalFrameDuration += chunk.GetDuration();
iter.Next();
}
// Send the EOS signal to OMXCodecWrapper.
if (mEndOfStream && iter.IsEnded() && !mEosSetInEncoder) {
uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
layers::Image* img = (!mLastFrame.GetImage() || mLastFrame.GetForceBlack())
? nullptr : mLastFrame.GetImage();
rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs,
OMXCodecWrapper::BUFFER_EOS);
NS_ENSURE_SUCCESS(rv, rv);
// Keep sending EOS signal until OMXVideoEncoder gets it.
mEosSetInEncoder = true;
}
// Dequeue an encoded frame from the output buffers of OMXCodecWrapper.
nsTArray<uint8_t> buffer;
int outFlags = 0;
int64_t outTimeStampUs = 0;
rv = mEncoder->GetNextEncodedFrame(&buffer, &outTimeStampUs, &outFlags,
GET_ENCODED_VIDEO_FRAME_TIMEOUT);
NS_ENSURE_SUCCESS(rv, rv);
if (!buffer.IsEmpty()) {
nsRefPtr<EncodedFrame> videoData = new EncodedFrame();
if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) {
videoData->SetFrameType(EncodedFrame::AVC_CSD);
} else {
videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
}
videoData->SwapInFrameData(buffer);
videoData->SetTimeStamp(outTimeStampUs);
aData.AppendEncodedFrame(videoData);
}
if (outFlags & OMXCodecWrapper::BUFFER_EOS) {
mEncodingComplete = true;
OMX_LOG("Done encoding video.");
}
return NS_OK;
}
示例6: MOZ_ASSERT
nsresult
ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
// Muxing complete, it doesn't allowed to reentry again.
if (mState == MUXING_DONE) {
MOZ_ASSERT(false);
return NS_ERROR_FAILURE;
}
FragmentBuffer* frag = nullptr;
uint32_t len = aData.GetEncodedFrames().Length();
if (!len) {
// no frame? why bother to WriteEncodedTrack
return NS_OK;
}
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]);
EncodedFrame::FrameType type = frame->GetFrameType();
if (type == EncodedFrame::AAC_AUDIO_FRAME ||
type == EncodedFrame::AAC_CSD ||
type == EncodedFrame::AMR_AUDIO_FRAME ||
type == EncodedFrame::AMR_AUDIO_CSD) {
frag = mAudioFragmentBuffer;
} else if (type == EncodedFrame::AVC_I_FRAME ||
type == EncodedFrame::AVC_P_FRAME ||
type == EncodedFrame::AVC_B_FRAME ||
type == EncodedFrame::AVC_CSD) {
frag = mVideoFragmentBuffer;
} else {
MOZ_ASSERT(0);
return NS_ERROR_FAILURE;
}
frag->AddFrame(frame);
}
// Encoder should send CSD (codec specific data) frame before sending the
// audio/video frames. When CSD data is ready, it is sufficient to generate a
// moov data. If encoder doesn't send CSD yet, muxer needs to wait before
// generating anything.
if (mType & Audio_Track && (!mAudioFragmentBuffer ||
!mAudioFragmentBuffer->HasCSD())) {
return NS_OK;
}
if (mType & Video_Track && (!mVideoFragmentBuffer ||
!mVideoFragmentBuffer->HasCSD())) {
return NS_OK;
}
// Only one FrameType in EncodedFrameContainer so it doesn't need to be
// inside the for-loop.
if (frag && (aFlags & END_OF_STREAM)) {
frag->SetEndOfStream();
}
nsresult rv;
bool EOS;
if (ReadyToRunState(EOS)) {
// TODO:
// The MediaEncoder doesn't use nsRunnable, so thread will be
// stocked on that part and the new added nsRunnable won't get to run
// before MediaEncoder completing. Before MediaEncoder change, it needs
// to call RunState directly.
// https://bugzilla.mozilla.org/show_bug.cgi?id=950429
rv = RunState();
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}
示例7: mon
/**
* Encoding flow in GetEncodedTrack():
* 1: Check the mInitialized state and the packet duration.
* 2: Move the data from mRawSegment to mSourceSegment.
* 3: Encode the video chunks in mSourceSegment in a for-loop.
* 3.1: Pick the video chunk by mRemainingTicks.
* 3.2: Calculate the encoding duration for the parameter of vpx_codec_encode().
* The encoding duration is a multiple of mEncodedFrameDuration.
* 3.3: Setup the video chunk to mVPXImageWrapper by PrepareRawFrame().
* 3.4: Send frame into vp8 encoder by vpx_codec_encode().
* 3.5: Get the output frame from encoder by calling GetEncodedPartitions().
* 3.6: Calculate the mRemainingTicks for next target frame.
* 3.7: Set the nextEncodeOperation for the next target frame.
* There is a heuristic: If the frame duration we have processed in
* mSourceSegment is 100ms, means that we can't spend more than 100ms to
* encode it.
* 4. Remove the encoded chunks in mSourceSegment after for-loop.
*
* Ex1: Input frame rate is 100 => input frame duration is 10ms for each.
* mEncodedFrameRate is 30 => output frame duration is 33ms.
* In this case, the frame duration in mSourceSegment will be:
* 1st : 0~10ms
* 2nd : 10~20ms
* 3rd : 20~30ms
* 4th : 30~40ms
* ...
* The VP8 encoder will take the 1st and 4th frames to encode. At beginning
* mRemainingTicks is 0 for 1st frame, then the mRemainingTicks is set
* to 23 to pick the 4th frame. (mEncodedFrameDuration - 1st frame duration)
*
* Ex2: Input frame rate is 25 => frame duration is 40ms for each.
* mEncodedFrameRate is 30 => output frame duration is 33ms.
* In this case, the frame duration in mSourceSegment will be:
* 1st : 0~40ms
* 2nd : 40~80ms
* 3rd : 80~120ms
* 4th : 120~160ms
* ...
* Because the input frame duration is 40ms larger than 33ms, so the first
* encoded frame duration will be 66ms by calling CalculateEncodedDuration.
* And the mRemainingTicks will be set to 26
* (CalculateRemainingTicks 0+66-40) in order to pick the next frame(2nd)
* in mSourceSegment.
*/
nsresult
VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
PROFILER_LABEL("VP8TrackEncoder", "GetEncodedTrack",
js::ProfileEntry::Category::OTHER);
bool EOS;
{
// Move all the samples from mRawSegment to mSourceSegment. We only hold
// the monitor in this block.
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait if mEncoder is not initialized, or when not enough raw data, but is
// not the end of stream nor is being canceled.
while (!mCanceled && (!mInitialized ||
(mRawSegment.GetDuration() + mSourceSegment.GetDuration() <
mEncodedFrameDuration && !mEndOfStream))) {
mon.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
mSourceSegment.AppendFrom(&mRawSegment);
EOS = mEndOfStream;
}
VideoSegment::ChunkIterator iter(mSourceSegment);
StreamTime durationCopied = 0;
StreamTime totalProcessedDuration = 0;
TimeStamp timebase = TimeStamp::Now();
EncodeOperation nextEncodeOperation = ENCODE_NORMAL_FRAME;
for (; !iter.IsEnded(); iter.Next()) {
VideoChunk &chunk = *iter;
// Accumulate chunk's duration to durationCopied until it reaches
// mRemainingTicks.
durationCopied += chunk.GetDuration();
MOZ_ASSERT(mRemainingTicks <= mEncodedFrameDuration);
VP8LOG("durationCopied %lld mRemainingTicks %lld\n",
durationCopied, mRemainingTicks);
if (durationCopied >= mRemainingTicks) {
VP8LOG("nextEncodeOperation is %d\n",nextEncodeOperation);
// Calculate encodedDuration for this target frame.
StreamTime encodedDuration = CalculateEncodedDuration(durationCopied);
// Encode frame.
if (nextEncodeOperation != SKIP_FRAME) {
nsresult rv = PrepareRawFrame(chunk);
NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE);
// Encode the data with VP8 encoder
int flags = (nextEncodeOperation == ENCODE_NORMAL_FRAME) ?
0 : VPX_EFLAG_FORCE_KF;
if (vpx_codec_encode(mVPXContext, mVPXImageWrapper, mEncodedTimestamp,
(unsigned long)encodedDuration, flags,
VPX_DL_REALTIME)) {
return NS_ERROR_FAILURE;
}
//.........这里部分代码省略.........
示例8: mon
nsresult
OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
PROFILER_LABEL("OpusTrackEncoder", "GetEncodedTrack",
js::ProfileEntry::Category::OTHER);
{
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait until initialized or cancelled.
while (!mCanceled && !mInitialized) {
mReentrantMonitor.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
}
// calculation below depends on the truth that mInitialized is true.
MOZ_ASSERT(mInitialized);
// re-sampled frames left last time which didn't fit into an Opus packet duration.
const int framesLeft = mResampledLeftover.Length() / mChannels;
// When framesLeft is 0, (GetPacketDuration() - framesLeft) is a multiple
// of kOpusSamplingRate. There is not precision loss in the integer division
// in computing framesToFetch. If frameLeft > 0, we need to add 1 to
// framesToFetch to ensure there will be at least n frames after re-sampling.
const int frameRoundUp = framesLeft ? 1 : 0;
MOZ_ASSERT(GetPacketDuration() >= framesLeft);
// Try to fetch m frames such that there will be n frames
// where (n + frameLeft) >= GetPacketDuration() after re-sampling.
const int framesToFetch = !mResampler ? GetPacketDuration()
: (GetPacketDuration() - framesLeft) * mSamplingRate / kOpusSamplingRate
+ frameRoundUp;
{
// Move all the samples from mRawSegment to mSourceSegment. We only hold
// the monitor in this block.
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait until enough raw data, end of stream or cancelled.
while (!mCanceled && mRawSegment.GetDuration() +
mSourceSegment.GetDuration() < framesToFetch &&
!mEndOfStream) {
mReentrantMonitor.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
mSourceSegment.AppendFrom(&mRawSegment);
// Pad |mLookahead| samples to the end of source stream to prevent lost of
// original data, the pcm duration will be calculated at rate 48K later.
if (mEndOfStream && !mEosSetInEncoder) {
mEosSetInEncoder = true;
mSourceSegment.AppendNullData(mLookahead);
}
}
// Start encoding data.
nsAutoTArray<AudioDataValue, 9600> pcm;
pcm.SetLength(GetPacketDuration() * mChannels);
AudioSegment::ChunkIterator iter(mSourceSegment);
int frameCopied = 0;
while (!iter.IsEnded() && frameCopied < framesToFetch) {
AudioChunk chunk = *iter;
// Chunk to the required frame size.
int frameToCopy = chunk.GetDuration();
if (frameCopied + frameToCopy > framesToFetch) {
frameToCopy = framesToFetch - frameCopied;
}
if (!chunk.IsNull()) {
// Append the interleaved data to the end of pcm buffer.
AudioTrackEncoder::InterleaveTrackData(chunk, frameToCopy, mChannels,
pcm.Elements() + frameCopied * mChannels);
} else {
memset(pcm.Elements() + frameCopied * mChannels, 0,
frameToCopy * mChannels * sizeof(AudioDataValue));
}
frameCopied += frameToCopy;
iter.Next();
}
RefPtr<EncodedFrame> audiodata = new EncodedFrame();
audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
int framesInPCM = frameCopied;
if (mResampler) {
nsAutoTArray<AudioDataValue, 9600> resamplingDest;
// We want to consume all the input data, so we slightly oversize the
// resampled data buffer so we can fit the output data in. We cannot really
// predict the output frame count at each call.
uint32_t outframes = frameCopied * kOpusSamplingRate / mSamplingRate + 1;
uint32_t inframes = frameCopied;
resamplingDest.SetLength(outframes * mChannels);
//.........这里部分代码省略.........
示例9: PROFILER_LABEL
nsresult
ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
PROFILER_LABEL("ISOMediaWriter", "WriteEncodedTrack",
js::ProfileEntry::Category::OTHER);
// Muxing complete, it doesn't allowed to reentry again.
if (mState == MUXING_DONE) {
MOZ_ASSERT(false);
return NS_ERROR_FAILURE;
}
FragmentBuffer* frag = nullptr;
uint32_t len = aData.GetEncodedFrames().Length();
if (!len) {
// no frame? why bother to WriteEncodedTrack
return NS_OK;
}
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]);
EncodedFrame::FrameType type = frame->GetFrameType();
if (type == EncodedFrame::AAC_AUDIO_FRAME ||
type == EncodedFrame::AAC_CSD ||
type == EncodedFrame::AMR_AUDIO_FRAME ||
type == EncodedFrame::AMR_AUDIO_CSD) {
frag = mAudioFragmentBuffer;
} else if (type == EncodedFrame::AVC_I_FRAME ||
type == EncodedFrame::AVC_P_FRAME ||
type == EncodedFrame::AVC_B_FRAME ||
type == EncodedFrame::AVC_CSD) {
frag = mVideoFragmentBuffer;
} else {
MOZ_ASSERT(0);
return NS_ERROR_FAILURE;
}
frag->AddFrame(frame);
}
// Encoder should send CSD (codec specific data) frame before sending the
// audio/video frames. When CSD data is ready, it is sufficient to generate a
// moov data. If encoder doesn't send CSD yet, muxer needs to wait before
// generating anything.
if (mType & Audio_Track && (!mAudioFragmentBuffer ||
!mAudioFragmentBuffer->HasCSD())) {
return NS_OK;
}
if (mType & Video_Track && (!mVideoFragmentBuffer ||
!mVideoFragmentBuffer->HasCSD())) {
return NS_OK;
}
// Only one FrameType in EncodedFrameContainer so it doesn't need to be
// inside the for-loop.
if (frag && (aFlags & END_OF_STREAM)) {
frag->SetEndOfStream();
}
nsresult rv;
bool EOS;
if (ReadyToRunState(EOS)) {
// Because track encoder won't generate new data after EOS, it needs to make
// sure the state reaches MUXING_DONE when EOS is signaled.
do {
rv = RunState();
} while (EOS && mState != MUXING_DONE);
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}