本文整理汇总了C++中EncodedFrameContainer::GetEncodedFrames方法的典型用法代码示例。如果您正苦于以下问题:C++ EncodedFrameContainer::GetEncodedFrames方法的具体用法?C++ EncodedFrameContainer::GetEncodedFrames怎么用?C++ EncodedFrameContainer::GetEncodedFrames使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类EncodedFrameContainer
的用法示例。
在下文中一共展示了EncodedFrameContainer::GetEncodedFrames方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LOG
nsresult
OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
AUTO_PROFILER_LABEL("OggWriter::WriteEncodedTrack", OTHER);
uint32_t len = aData.GetEncodedFrames().Length();
for (uint32_t i = 0; i < len; i++) {
if (aData.GetEncodedFrames()[i]->GetFrameType() != EncodedFrame::OPUS_AUDIO_FRAME) {
LOG("[OggWriter] wrong encoded data type!");
return NS_ERROR_FAILURE;
}
// only pass END_OF_STREAM on the last frame!
nsresult rv = WriteEncodedData(aData.GetEncodedFrames()[i]->GetFrameData(),
aData.GetEncodedFrames()[i]->GetDuration(),
i < len-1 ? (aFlags & ~ContainerWriter::END_OF_STREAM) :
aFlags);
if (NS_FAILED(rv)) {
LOG("%p Failed to WriteEncodedTrack!", this);
return rv;
}
}
return NS_OK;
}
示例2:
nsresult
WebMWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
PROFILER_LABEL("WebMWriter", "SetMetadata",
js::ProfileEntry::Category::OTHER);
for (uint32_t i = 0 ; i < aData.GetEncodedFrames().Length(); i++) {
mEbmlComposer->WriteSimpleBlock(aData.GetEncodedFrames().ElementAt(i).get());
}
return NS_OK;
}
示例3: generator
TEST(OpusAudioTrackEncoder, FrameEncode)
{
const int32_t channels = 1;
const int32_t sampleRate = 44100;
TestOpusTrackEncoder encoder;
EXPECT_TRUE(encoder.TestOpusRawCreation(channels, sampleRate));
// Generate five seconds of raw audio data.
AudioGenerator generator(channels, sampleRate);
AudioSegment segment;
const int32_t samples = sampleRate * 5;
generator.Generate(segment, samples);
encoder.SetStartOffset(0);
encoder.AppendAudioSegment(Move(segment));
encoder.AdvanceCurrentTime(samples);
EncodedFrameContainer container;
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
// Verify that encoded data is 5 seconds long.
uint64_t totalDuration = 0;
for (auto& frame : container.GetEncodedFrames()) {
totalDuration += frame->GetDuration();
}
// 44100 as used above gets resampled to 48000 for opus.
const uint64_t five = 48000 * 5;
EXPECT_EQ(five, totalDuration);
}
示例4: LOG
nsresult
OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
for (uint32_t i = 0; i < aData.GetEncodedFrames().Length(); i++) {
if (aData.GetEncodedFrames()[i]->GetFrameType() != EncodedFrame::OPUS_AUDIO_FRAME) {
LOG("[OggWriter] wrong encoded data type!");
return NS_ERROR_FAILURE;
}
nsresult rv = WriteEncodedData(aData.GetEncodedFrames()[i]->GetFrameData(),
aData.GetEncodedFrames()[i]->GetDuration(),
aFlags);
if (NS_FAILED(rv)) {
LOG("%p Failed to WriteEncodedTrack!", this);
return rv;
}
}
return NS_OK;
}
示例5: LOG
nsresult
MediaEncoder::WriteEncodedDataToMuxer(TrackEncoder *aTrackEncoder)
{
if (aTrackEncoder == nullptr) {
return NS_OK;
}
if (aTrackEncoder->IsEncodingComplete()) {
return NS_OK;
}
PROFILER_LABEL("MediaEncoder", "WriteEncodedDataToMuxer",
js::ProfileEntry::Category::OTHER);
EncodedFrameContainer encodedVideoData;
nsresult rv = aTrackEncoder->GetEncodedTrack(encodedVideoData);
if (NS_FAILED(rv)) {
// Encoding might be canceled.
LOG(LogLevel::Error, ("Error! Fail to get encoded data from video encoder."));
mState = ENCODE_ERROR;
return rv;
}
// Update timestamps to accommodate pauses
const nsTArray<RefPtr<EncodedFrame> >& encodedFrames =
encodedVideoData.GetEncodedFrames();
// Take a copy of the atomic so we don't continually access it
uint64_t microsecondsSpentPaused = mMicrosecondsSpentPaused;
for (size_t i = 0; i < encodedFrames.Length(); ++i) {
RefPtr<EncodedFrame> frame = encodedFrames[i];
if (frame->GetTimeStamp() > microsecondsSpentPaused &&
frame->GetTimeStamp() - microsecondsSpentPaused > mLastMuxedTimestamp) {
// Use the adjusted timestamp if it's after the last timestamp
frame->SetTimeStamp(frame->GetTimeStamp() - microsecondsSpentPaused);
} else {
// If not, we force the last time stamp. We do this so the frames are
// still around and in order in case the codec needs to reference them.
// Dropping them here may result in artifacts in playback.
frame->SetTimeStamp(mLastMuxedTimestamp);
}
MOZ_ASSERT(mLastMuxedTimestamp <= frame->GetTimeStamp(),
"Our frames should be ordered by this point!");
mLastMuxedTimestamp = frame->GetTimeStamp();
}
rv = mWriter->WriteEncodedTrack(encodedVideoData,
aTrackEncoder->IsEncodingComplete() ?
ContainerWriter::END_OF_STREAM : 0);
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Error! Fail to write encoded video track to the media container."));
mState = ENCODE_ERROR;
}
return rv;
}
示例6: frame
nsresult
ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
// Muxing complete, it doesn't allowed to reentry again.
if (mState == MUXING_DONE) {
MOZ_ASSERT(false);
return NS_ERROR_FAILURE;
}
FragmentBuffer* frag = nullptr;
uint32_t len = aData.GetEncodedFrames().Length();
if (!len) {
// no frame? why bother to WriteEncodedTrack
return NS_OK;
}
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]);
EncodedFrame::FrameType type = frame->GetFrameType();
if (type == EncodedFrame::AAC_AUDIO_FRAME ||
type == EncodedFrame::AAC_CSD ||
type == EncodedFrame::AMR_AUDIO_FRAME ||
type == EncodedFrame::AMR_AUDIO_CSD) {
frag = mAudioFragmentBuffer;
} else if (type == EncodedFrame::AVC_I_FRAME ||
type == EncodedFrame::AVC_P_FRAME ||
type == EncodedFrame::AVC_B_FRAME ||
type == EncodedFrame::AVC_CSD) {
frag = mVideoFragmentBuffer;
} else {
MOZ_ASSERT(0);
return NS_ERROR_FAILURE;
}
frag->AddFrame(frame);
}
// Encoder should send CSD (codec specific data) frame before sending the
// audio/video frames. When CSD data is ready, it is sufficient to generate a
// moov data. If encoder doesn't send CSD yet, muxer needs to wait before
// generating anything.
if (mType & Audio_Track && (!mAudioFragmentBuffer ||
!mAudioFragmentBuffer->HasCSD())) {
return NS_OK;
}
if (mType & Video_Track && (!mVideoFragmentBuffer ||
!mVideoFragmentBuffer->HasCSD())) {
return NS_OK;
}
// Only one FrameType in EncodedFrameContainer so it doesn't need to be
// inside the for-loop.
if (frag && (aFlags & END_OF_STREAM)) {
frag->SetEndOfStream();
}
nsresult rv;
bool EOS;
if (ReadyToRunState(EOS)) {
// TODO:
// The MediaEncoder doesn't use nsRunnable, so thread will be
// stocked on that part and the new added nsRunnable won't get to run
// before MediaEncoder completing. Before MediaEncoder change, it needs
// to call RunState directly.
// https://bugzilla.mozilla.org/show_bug.cgi?id=950429
rv = RunState();
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}
示例7: mon
//.........这里部分代码省略.........
// Move all the samples from mRawSegment to mSourceSegment. We only hold
// the monitor in this block.
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait if mEncoder is not initialized, or when not enough raw data, but is
// not the end of stream nor is being canceled.
while (!mCanceled && (!mInitialized ||
(mRawSegment.GetDuration() + mSourceSegment.GetDuration() <
mEncodedFrameDuration && !mEndOfStream))) {
mon.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
mSourceSegment.AppendFrom(&mRawSegment);
EOS = mEndOfStream;
}
VideoSegment::ChunkIterator iter(mSourceSegment);
StreamTime durationCopied = 0;
StreamTime totalProcessedDuration = 0;
TimeStamp timebase = TimeStamp::Now();
EncodeOperation nextEncodeOperation = ENCODE_NORMAL_FRAME;
for (; !iter.IsEnded(); iter.Next()) {
VideoChunk &chunk = *iter;
// Accumulate chunk's duration to durationCopied until it reaches
// mRemainingTicks.
durationCopied += chunk.GetDuration();
MOZ_ASSERT(mRemainingTicks <= mEncodedFrameDuration);
VP8LOG("durationCopied %lld mRemainingTicks %lld\n",
durationCopied, mRemainingTicks);
if (durationCopied >= mRemainingTicks) {
VP8LOG("nextEncodeOperation is %d\n",nextEncodeOperation);
// Calculate encodedDuration for this target frame.
StreamTime encodedDuration = CalculateEncodedDuration(durationCopied);
// Encode frame.
if (nextEncodeOperation != SKIP_FRAME) {
nsresult rv = PrepareRawFrame(chunk);
NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE);
// Encode the data with VP8 encoder
int flags = (nextEncodeOperation == ENCODE_NORMAL_FRAME) ?
0 : VPX_EFLAG_FORCE_KF;
if (vpx_codec_encode(mVPXContext, mVPXImageWrapper, mEncodedTimestamp,
(unsigned long)encodedDuration, flags,
VPX_DL_REALTIME)) {
return NS_ERROR_FAILURE;
}
// Get the encoded data from VP8 encoder.
GetEncodedPartitions(aData);
} else {
// SKIP_FRAME
// Extend the duration of the last encoded data in aData
// because this frame will be skip.
nsRefPtr<EncodedFrame> last = nullptr;
last = aData.GetEncodedFrames().LastElement();
if (last) {
last->SetDuration(last->GetDuration() + encodedDuration);
}
}
// Move forward the mEncodedTimestamp.
mEncodedTimestamp += encodedDuration;
totalProcessedDuration += durationCopied;
// Calculate mRemainingTicks for next target frame.
mRemainingTicks = CalculateRemainingTicks(durationCopied,
encodedDuration);
// Check the remain data is enough for next target frame.
if (mSourceSegment.GetDuration() - totalProcessedDuration
>= mEncodedFrameDuration) {
TimeDuration elapsedTime = TimeStamp::Now() - timebase;
nextEncodeOperation = GetNextEncodeOperation(elapsedTime,
totalProcessedDuration);
// Reset durationCopied for next iteration.
durationCopied = 0;
} else {
// Process done, there is no enough data left for next iteration,
// break the for-loop.
break;
}
}
}
// Remove the chunks we have processed.
mSourceSegment.RemoveLeading(totalProcessedDuration);
VP8LOG("RemoveLeading %lld\n",totalProcessedDuration);
// End of stream, pull the rest frames in encoder.
if (EOS) {
VP8LOG("mEndOfStream is true\n");
mEncodingComplete = true;
if (vpx_codec_encode(mVPXContext, nullptr, mEncodedTimestamp,
mEncodedFrameDuration, 0, VPX_DL_REALTIME)) {
return NS_ERROR_FAILURE;
}
GetEncodedPartitions(aData);
}
return NS_OK ;
}
示例8: frame
nsresult
ISOMediaWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
uint32_t aFlags)
{
PROFILER_LABEL("ISOMediaWriter", "WriteEncodedTrack",
js::ProfileEntry::Category::OTHER);
// Muxing complete, it doesn't allowed to reentry again.
if (mState == MUXING_DONE) {
MOZ_ASSERT(false);
return NS_ERROR_FAILURE;
}
FragmentBuffer* frag = nullptr;
uint32_t len = aData.GetEncodedFrames().Length();
if (!len) {
// no frame? why bother to WriteEncodedTrack
return NS_OK;
}
for (uint32_t i = 0; i < len; i++) {
nsRefPtr<EncodedFrame> frame(aData.GetEncodedFrames()[i]);
EncodedFrame::FrameType type = frame->GetFrameType();
if (type == EncodedFrame::AAC_AUDIO_FRAME ||
type == EncodedFrame::AAC_CSD ||
type == EncodedFrame::AMR_AUDIO_FRAME ||
type == EncodedFrame::AMR_AUDIO_CSD) {
frag = mAudioFragmentBuffer;
} else if (type == EncodedFrame::AVC_I_FRAME ||
type == EncodedFrame::AVC_P_FRAME ||
type == EncodedFrame::AVC_B_FRAME ||
type == EncodedFrame::AVC_CSD) {
frag = mVideoFragmentBuffer;
} else {
MOZ_ASSERT(0);
return NS_ERROR_FAILURE;
}
frag->AddFrame(frame);
}
// Encoder should send CSD (codec specific data) frame before sending the
// audio/video frames. When CSD data is ready, it is sufficient to generate a
// moov data. If encoder doesn't send CSD yet, muxer needs to wait before
// generating anything.
if (mType & Audio_Track && (!mAudioFragmentBuffer ||
!mAudioFragmentBuffer->HasCSD())) {
return NS_OK;
}
if (mType & Video_Track && (!mVideoFragmentBuffer ||
!mVideoFragmentBuffer->HasCSD())) {
return NS_OK;
}
// Only one FrameType in EncodedFrameContainer so it doesn't need to be
// inside the for-loop.
if (frag && (aFlags & END_OF_STREAM)) {
frag->SetEndOfStream();
}
nsresult rv;
bool EOS;
if (ReadyToRunState(EOS)) {
// Because track encoder won't generate new data after EOS, it needs to make
// sure the state reaches MUXING_DONE when EOS is signaled.
do {
rv = RunState();
} while (EOS && mState != MUXING_DONE);
NS_ENSURE_SUCCESS(rv, rv);
}
return NS_OK;
}