本文整理汇总了C++中EncodedFrameContainer::AppendEncodedFrame方法的典型用法代码示例。如果您正苦于以下问题:C++ EncodedFrameContainer::AppendEncodedFrame方法的具体用法?C++ EncodedFrameContainer::AppendEncodedFrame怎么用?C++ EncodedFrameContainer::AppendEncodedFrame使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类EncodedFrameContainer
的用法示例。
在下文中一共展示了EncodedFrameContainer::AppendEncodedFrame方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: while
nsresult
VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
{
vpx_codec_iter_t iter = nullptr;
EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
nsTArray<uint8_t> frameData;
nsresult rv;
const vpx_codec_cx_pkt_t *pkt = nullptr;
while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
// Copy the encoded data from libvpx to frameData
frameData.AppendElements((uint8_t*)pkt->data.frame.buf,
pkt->data.frame.sz);
break;
}
default: {
break;
}
}
// End of frame
if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
frameType = EncodedFrame::VP8_I_FRAME;
}
break;
}
}
if (!frameData.IsEmpty() &&
(pkt->data.frame.pts == mEncodedTimestamp)) {
// Copy the encoded data to aData.
EncodedFrame* videoData = new EncodedFrame();
videoData->SetFrameType(frameType);
// Convert the timestamp and duration to Usecs.
CheckedInt64 timestamp = FramesToUsecs(mEncodedTimestamp, mTrackRate);
if (timestamp.isValid()) {
videoData->SetTimeStamp(
(uint64_t)FramesToUsecs(mEncodedTimestamp, mTrackRate).value());
}
CheckedInt64 duration = FramesToUsecs(pkt->data.frame.duration, mTrackRate);
if (duration.isValid()) {
videoData->SetDuration(
(uint64_t)FramesToUsecs(pkt->data.frame.duration, mTrackRate).value());
}
rv = videoData->SwapInFrameData(frameData);
NS_ENSURE_SUCCESS(rv, rv);
VP8LOG("GetEncodedPartitions TimeStamp %lld Duration %lld\n",
videoData->GetTimeStamp(), videoData->GetDuration());
VP8LOG("frameType %d\n", videoData->GetFrameType());
aData.AppendEncodedFrame(videoData);
}
return NS_OK;
}
示例2: AppendDummyFrame
// When we append an I-Frame into WebM muxer, the muxer will treat previous
// data as "a cluster".
// In these test cases, we will call the function many times to enclose the
// previous cluster so that we can retrieve data by |GetContainerData|.
void AppendDummyFrame(EncodedFrame::FrameType aFrameType,
uint64_t aDuration) {
EncodedFrameContainer encodedVideoData;
nsTArray<uint8_t> frameData;
RefPtr<EncodedFrame> videoData = new EncodedFrame();
// Create dummy frame data.
frameData.SetLength(FIXED_FRAMESIZE);
videoData->SetFrameType(aFrameType);
videoData->SetTimeStamp(mTimestamp);
videoData->SetDuration(aDuration);
videoData->SwapInFrameData(frameData);
encodedVideoData.AppendEncodedFrame(videoData);
WriteEncodedTrack(encodedVideoData, 0);
mTimestamp += aDuration;
}
示例3: while
void
VorbisTrackEncoder::GetEncodedFrames(EncodedFrameContainer& aData)
{
// vorbis does some data preanalysis, then divvies up blocks for
// more involved (potentially parallel) processing. Get a single
// block for encoding now.
while (vorbis_analysis_blockout(&mVorbisDsp, &mVorbisBlock) == 1) {
ogg_packet oggPacket;
if (vorbis_analysis(&mVorbisBlock, &oggPacket) == 0) {
VORBISLOG("vorbis_analysis_blockout block size %d", oggPacket.bytes);
EncodedFrame* audiodata = new EncodedFrame();
audiodata->SetFrameType(EncodedFrame::AUDIO_FRAME);
nsTArray<uint8_t> frameData;
frameData.AppendElements(oggPacket.packet, oggPacket.bytes);
audiodata->SetFrameData(&frameData);
aData.AppendEncodedFrame(audiodata);
}
}
}
示例4: if
nsresult
OmxAudioTrackEncoder::AppendEncodedFrames(EncodedFrameContainer& aContainer)
{
nsTArray<uint8_t> frameData;
int outFlags = 0;
int64_t outTimeUs = -1;
nsresult rv = mEncoder->GetNextEncodedFrame(&frameData, &outTimeUs, &outFlags,
3000); // wait up to 3ms
NS_ENSURE_SUCCESS(rv, rv);
if (!frameData.IsEmpty()) {
bool isCSD = false;
if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) { // codec specific data
isCSD = true;
} else if (outFlags & OMXCodecWrapper::BUFFER_EOS) { // last frame
mEncodingComplete = true;
}
nsRefPtr<EncodedFrame> audiodata = new EncodedFrame();
if (mEncoder->GetCodecType() == OMXCodecWrapper::AAC_ENC) {
audiodata->SetFrameType(isCSD ?
EncodedFrame::AAC_CSD : EncodedFrame::AAC_AUDIO_FRAME);
} else if (mEncoder->GetCodecType() == OMXCodecWrapper::AMR_NB_ENC){
audiodata->SetFrameType(isCSD ?
EncodedFrame::AMR_AUDIO_CSD : EncodedFrame::AMR_AUDIO_FRAME);
} else {
MOZ_ASSERT(false, "audio codec not supported");
}
audiodata->SetTimeStamp(outTimeUs);
audiodata->SwapInFrameData(frameData);
aContainer.AppendEncodedFrame(audiodata);
}
return NS_OK;
}
示例5: mon
nsresult
OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
VideoSegment segment;
{
// Move all the samples from mRawSegment to segment. We only hold the
// monitor in this block.
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
// Wait if mEncoder is not initialized nor is being canceled.
while (!mCanceled && (!mInitialized ||
(mRawSegment.GetDuration() == 0 && !mEndOfStream))) {
mReentrantMonitor.Wait();
}
if (mCanceled || mEncodingComplete) {
return NS_ERROR_FAILURE;
}
segment.AppendFrom(&mRawSegment);
}
nsresult rv;
// Start queuing raw frames to the input buffers of OMXCodecWrapper.
VideoSegment::ChunkIterator iter(segment);
while (!iter.IsEnded()) {
VideoChunk chunk = *iter;
// Send only the unique video frames to OMXCodecWrapper.
if (mLastFrame != chunk.mFrame) {
uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
layers::Image* img = (chunk.IsNull() || chunk.mFrame.GetForceBlack()) ?
nullptr : chunk.mFrame.GetImage();
rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs);
NS_ENSURE_SUCCESS(rv, rv);
}
mLastFrame.TakeFrom(&chunk.mFrame);
mTotalFrameDuration += chunk.GetDuration();
iter.Next();
}
// Send the EOS signal to OMXCodecWrapper.
if (mEndOfStream && iter.IsEnded() && !mEosSetInEncoder) {
uint64_t totalDurationUs = mTotalFrameDuration * USECS_PER_S / mTrackRate;
layers::Image* img = (!mLastFrame.GetImage() || mLastFrame.GetForceBlack())
? nullptr : mLastFrame.GetImage();
rv = mEncoder->Encode(img, mFrameWidth, mFrameHeight, totalDurationUs,
OMXCodecWrapper::BUFFER_EOS);
NS_ENSURE_SUCCESS(rv, rv);
// Keep sending EOS signal until OMXVideoEncoder gets it.
mEosSetInEncoder = true;
}
// Dequeue an encoded frame from the output buffers of OMXCodecWrapper.
nsTArray<uint8_t> buffer;
int outFlags = 0;
int64_t outTimeStampUs = 0;
rv = mEncoder->GetNextEncodedFrame(&buffer, &outTimeStampUs, &outFlags,
GET_ENCODED_VIDEO_FRAME_TIMEOUT);
NS_ENSURE_SUCCESS(rv, rv);
if (!buffer.IsEmpty()) {
nsRefPtr<EncodedFrame> videoData = new EncodedFrame();
if (outFlags & OMXCodecWrapper::BUFFER_CODEC_CONFIG) {
videoData->SetFrameType(EncodedFrame::AVC_CSD);
} else {
videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
}
videoData->SwapInFrameData(buffer);
videoData->SetTimeStamp(outTimeStampUs);
aData.AppendEncodedFrame(videoData);
}
if (outFlags & OMXCodecWrapper::BUFFER_EOS) {
mEncodingComplete = true;
OMX_LOG("Done encoding video.");
}
return NS_OK;
}
示例6: mon
//.........这里部分代码省略.........
RefPtr<EncodedFrame> audiodata = new EncodedFrame();
audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
int framesInPCM = frameCopied;
if (mResampler) {
nsAutoTArray<AudioDataValue, 9600> resamplingDest;
// We want to consume all the input data, so we slightly oversize the
// resampled data buffer so we can fit the output data in. We cannot really
// predict the output frame count at each call.
uint32_t outframes = frameCopied * kOpusSamplingRate / mSamplingRate + 1;
uint32_t inframes = frameCopied;
resamplingDest.SetLength(outframes * mChannels);
#if MOZ_SAMPLE_TYPE_S16
short* in = reinterpret_cast<short*>(pcm.Elements());
short* out = reinterpret_cast<short*>(resamplingDest.Elements());
speex_resampler_process_interleaved_int(mResampler, in, &inframes,
out, &outframes);
#else
float* in = reinterpret_cast<float*>(pcm.Elements());
float* out = reinterpret_cast<float*>(resamplingDest.Elements());
speex_resampler_process_interleaved_float(mResampler, in, &inframes,
out, &outframes);
#endif
MOZ_ASSERT(pcm.Length() >= mResampledLeftover.Length());
PodCopy(pcm.Elements(), mResampledLeftover.Elements(),
mResampledLeftover.Length());
uint32_t outframesToCopy = std::min(outframes,
static_cast<uint32_t>(GetPacketDuration() - framesLeft));
MOZ_ASSERT(pcm.Length() - mResampledLeftover.Length() >=
outframesToCopy * mChannels);
PodCopy(pcm.Elements() + mResampledLeftover.Length(),
resamplingDest.Elements(), outframesToCopy * mChannels);
int frameLeftover = outframes - outframesToCopy;
mResampledLeftover.SetLength(frameLeftover * mChannels);
PodCopy(mResampledLeftover.Elements(),
resamplingDest.Elements() + outframesToCopy * mChannels,
mResampledLeftover.Length());
// This is always at 48000Hz.
framesInPCM = framesLeft + outframesToCopy;
audiodata->SetDuration(framesInPCM);
} else {
// The ogg time stamping and pre-skip is always timed at 48000.
audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate));
}
// Remove the raw data which has been pulled to pcm buffer.
// The value of frameCopied should equal to (or smaller than, if eos)
// GetPacketDuration().
mSourceSegment.RemoveLeading(frameCopied);
// Has reached the end of input stream and all queued data has pulled for
// encoding.
if (mSourceSegment.GetDuration() == 0 && mEndOfStream) {
mEncodingComplete = true;
LOG("[Opus] Done encoding.");
}
MOZ_ASSERT(mEndOfStream || framesInPCM == GetPacketDuration());
// Append null data to pcm buffer if the leftover data is not enough for
// opus encoder.
if (framesInPCM < GetPacketDuration() && mEndOfStream) {
PodZero(pcm.Elements() + framesInPCM * mChannels,
(GetPacketDuration() - framesInPCM) * mChannels);
}
nsTArray<uint8_t> frameData;
// Encode the data with Opus Encoder.
frameData.SetLength(MAX_DATA_BYTES);
// result is returned as opus error code if it is negative.
int result = 0;
#ifdef MOZ_SAMPLE_TYPE_S16
const opus_int16* pcmBuf = static_cast<opus_int16*>(pcm.Elements());
result = opus_encode(mEncoder, pcmBuf, GetPacketDuration(),
frameData.Elements(), MAX_DATA_BYTES);
#else
const float* pcmBuf = static_cast<float*>(pcm.Elements());
result = opus_encode_float(mEncoder, pcmBuf, GetPacketDuration(),
frameData.Elements(), MAX_DATA_BYTES);
#endif
frameData.SetLength(result >= 0 ? result : 0);
if (result < 0) {
LOG("[Opus] Fail to encode data! Result: %s.", opus_strerror(result));
}
if (mEncodingComplete) {
if (mResampler) {
speex_resampler_destroy(mResampler);
mResampler = nullptr;
}
mResampledLeftover.SetLength(0);
}
audiodata->SwapInFrameData(frameData);
aData.AppendEncodedFrame(audiodata);
return result >= 0 ? NS_OK : NS_ERROR_FAILURE;
}