本文整理汇总了C++中mediasource::ReadOptions类的典型用法代码示例。如果您正苦于以下问题:C++ ReadOptions类的具体用法?C++ ReadOptions怎么用?C++ ReadOptions使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ReadOptions类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mSource
TimedTextSRTSource::TimedTextSRTSource(const sp<DataSource>& dataSource)
: mSource(dataSource),
mMetaData(new MetaData),
mIndex(0) {
#ifdef MTK_SUBTITLE_SUPPORT
mFileEncodeType = ENCODE_TYPE_NORMAL;
#endif
#ifdef SELF_TEST
scanFile();
int64_t startTimeUs = 0;
int64_t endTimeUs = 0;
Parcel parcel;
MediaSource::ReadOptions options;
status_t err =OK;
int len =7;
int st[] = {888, 18111, 22000, 28888, 38000, 54800, 76000}; //ms
for(int i =0; i< len; i++){
int64_t temp = st[i] * 1000ll; //us
options.setSeekTo(temp);
err = read(&startTimeUs, &endTimeUs, &parcel, &options);
ALOGE("[--SELF_TEST--] seekTime=%lld, getStartTime=%lld, getEndTime=%lld, isReadSuccessfully:%d", temp, startTimeUs, endTimeUs, err);
}
#endif
// TODO: Need to detect the language, because SRT doesn't give language
// information explicitly.
mMetaData->setCString(kKeyMediaLanguage, "und");
}
示例2: mSource
TimedTextSUBSource::TimedTextSUBSource(const sp<DataSource>& dataSource)
: mSource(dataSource),
mMetaData(new MetaData),
mIndex(0),
mFrameRate(TimedTextUtil::DEFAULT_FRAME_RATE){
mFileEncodeType = ENCODE_TYPE_NORMAL;
#ifdef SELF_TEST
scanFile();
int64_t startTimeUs = 0;
int64_t endTimeUs = 0;
Parcel parcel;
MediaSource::ReadOptions options;
status_t err =OK;
int len =7;
int st[] = {888, 18111, 22000, 28888, 38000, 54800, 76000}; //ms
for(int i =0; i< len; i++){
int64_t temp = st[i] * 1000ll; //us
options.setSeekTo(temp);
err = read(&startTimeUs, &endTimeUs, &parcel, &options);
ALOGE("[--SELF_TEST--] seekTime=%lld, getStartTime=%lld, getEndTime=%lld, isReadSuccessfully:%d", temp, startTimeUs, endTimeUs, err);
}
#endif
}
示例3: mExSource
//==============================External SP Case===============================
TimedTextASSSource::TimedTextASSSource(const sp<DataSource>& dataSource)
: mExSource(dataSource),
mExMetaData(new MetaData),
mExIndex(0) {
mASSFlag = TextDescriptions::OUT_OF_BAND_TEXT_ASS;
#ifdef SELF_TEST
scanFile();
int64_t startTimeUs = 0;
int64_t endTimeUs = 0;
Parcel parcel;
MediaSource::ReadOptions options;
status_t err =OK;
int len =7;
int st[] = {888, 18111, 22000, 28888, 38000, 54800, 76000}; //ms
for(int i =0; i< len; i++){
int64_t temp = st[i] * 1000ll; //us
options.setSeekTo(temp);
err = read(&startTimeUs, &endTimeUs, &parcel, &options);
ALOGE("[--SELF_TEST--] seekTime=%lld, getStartTime=%lld, getEndTime=%lld, isReadSuccessfully:%d", temp, startTimeUs, endTimeUs, err);
}
#endif
}
示例4: ToAudioFrame
bool
OmxDecoder::ReadAudio(AudioFrame *aFrame, int64_t aSeekTimeUs)
{
status_t err;
if (mAudioMetadataRead && aSeekTimeUs == -1) {
// Use the data read into the buffer during metadata time
err = OK;
}
else {
ReleaseAudioBuffer();
if (aSeekTimeUs != -1) {
MediaSource::ReadOptions options;
options.setSeekTo(aSeekTimeUs);
err = mAudioSource->read(&mAudioBuffer, &options);
} else {
err = mAudioSource->read(&mAudioBuffer);
}
}
mAudioMetadataRead = false;
aSeekTimeUs = -1;
aFrame->mSize = 0;
if (err == OK && mAudioBuffer && mAudioBuffer->range_length() != 0) {
int64_t timeUs;
if (!mAudioBuffer->meta_data()->findInt64(kKeyTime, &timeUs))
return false;
return ToAudioFrame(aFrame, timeUs,
mAudioBuffer->data(),
mAudioBuffer->range_offset(),
mAudioBuffer->range_length(),
mAudioChannels, mAudioSampleRate);
}
else if (err == INFO_FORMAT_CHANGED) {
// If the format changed, update our cached info.
if (!SetAudioFormat()) {
return false;
} else {
return ReadAudio(aFrame, aSeekTimeUs);
}
}
else if (err == ERROR_END_OF_STREAM) {
if (aFrame->mSize == 0) {
return false;
}
}
else if (err == -ETIMEDOUT) {
LOG(LogLevel::Debug, "OmxDecoder::ReadAudio timed out, will retry");
return true;
}
else if (err != OK) {
LOG(LogLevel::Debug, "OmxDecoder::ReadAudio failed, err=%d", err);
return false;
}
return true;
}
示例5: CheckStartTimeMs
TEST_F(TimedTextSRTSourceTest, seekTimeIsEarlierThanFirst) {
MediaSource::ReadOptions options;
options.setSeekTo(500, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
EXPECT_EQ(OK, err);
EXPECT_EQ(1 * kSecToUsec, startTimeUs);
CheckStartTimeMs(parcel, 1 * kSecToMsec);
}
示例6: ReadAudio
bool OmxDecoder::ReadAudio(AudioFrame *aFrame, int64_t aSeekTimeUs)
{
MOZ_ASSERT(aSeekTimeUs >= -1);
status_t err;
if (mAudioMetadataRead && aSeekTimeUs == -1) {
// Use the data read into the buffer during metadata time
err = OK;
}
else {
ReleaseAudioBuffer();
if (aSeekTimeUs != -1) {
MediaSource::ReadOptions options;
options.setSeekTo(aSeekTimeUs);
err = mAudioSource->read(&mAudioBuffer, &options);
} else {
err = mAudioSource->read(&mAudioBuffer);
}
}
mAudioMetadataRead = false;
aSeekTimeUs = -1;
if (err == OK && mAudioBuffer->range_length() != 0) {
int64_t timeUs;
if (!mAudioBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
LOG("no frame time");
return false;
}
if (timeUs < 0) {
LOG("frame time %lld must be nonnegative", timeUs);
return false;
}
return ToAudioFrame(aFrame, timeUs,
mAudioBuffer->data(),
mAudioBuffer->range_offset(),
mAudioBuffer->range_length(),
mAudioChannels, mAudioSampleRate);
}
else if (err == INFO_FORMAT_CHANGED) {
// If the format changed, update our cached info.
LOG("mAudioSource INFO_FORMAT_CHANGED");
if (!SetAudioFormat())
return false;
else
return ReadAudio(aFrame, aSeekTimeUs);
}
else if (err == ERROR_END_OF_STREAM) {
LOG("mAudioSource END_OF_STREAM");
}
else if (err != OK) {
LOG("mAudioSource ERROR %#x", err);
}
return err == OK;
}
示例7:
TEST_F(TimedTextSRTSourceTest, seekTimeIsLaterThanLast) {
MediaSource::ReadOptions options;
options.setSeekTo(7 * kSecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
EXPECT_EQ(ERROR_END_OF_STREAM, err);
options.setSeekTo(8 * kSecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
EXPECT_EQ(ERROR_END_OF_STREAM, err);
}
示例8: performSeekTest
static void performSeekTest(const sp<MediaSource> &source) {
CHECK_EQ((status_t)OK, source->start());
int64_t durationUs;
CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs));
for (int64_t seekTimeUs = 0; seekTimeUs <= durationUs;
seekTimeUs += 60000ll) {
MediaSource::ReadOptions options;
options.setSeekTo(
seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
MediaBuffer *buffer;
status_t err;
for (;;) {
err = source->read(&buffer, &options);
options.clearSeekTo();
if (err == INFO_FORMAT_CHANGED) {
CHECK(buffer == NULL);
continue;
}
if (err != OK) {
CHECK(buffer == NULL);
break;
}
if (buffer->range_length() > 0) {
break;
}
CHECK(buffer != NULL);
buffer->release();
buffer = NULL;
}
if (err == OK) {
int64_t timeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
printf("%lld\t%lld\t%lld\n", seekTimeUs, timeUs, seekTimeUs - timeUs);
buffer->release();
buffer = NULL;
} else {
printf("ERROR\n");
break;
}
}
CHECK_EQ((status_t)OK, source->stop());
}
示例9: fetchTrackSamples
ssize_t NuMediaExtractor::fetchTrackSamples(
int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
TrackInfo *minInfo = NULL;
ssize_t minIndex = -1;
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
if (seekTimeUs >= 0ll) {
info->mFinalResult = OK;
if (info->mSample != NULL) {
info->mSample->release();
info->mSample = NULL;
info->mSampleTimeUs = -1ll;
}
} else if (info->mFinalResult != OK) {
continue;
}
if (info->mSample == NULL) {
MediaSource::ReadOptions options;
if (seekTimeUs >= 0ll) {
options.setSeekTo(seekTimeUs, mode);
}
status_t err = info->mSource->read(&info->mSample, &options);
if (err != OK) {
CHECK(info->mSample == NULL);
info->mFinalResult = err;
if (info->mFinalResult != ERROR_END_OF_STREAM) {
ALOGW("read on track %d failed with error %d",
info->mTrackIndex, err);
}
info->mSampleTimeUs = -1ll;
continue;
} else {
CHECK(info->mSample != NULL);
CHECK(info->mSample->meta_data()->findInt64(
kKeyTime, &info->mSampleTimeUs));
}
}
if (minInfo == NULL || info->mSampleTimeUs < minInfo->mSampleTimeUs) {
minInfo = info;
minIndex = i;
}
}
return minIndex;
}
示例10: ReadAudio
bool OmxDecoder::ReadAudio(AudioFrame *aFrame, int64_t aSeekTimeUs)
{
status_t err;
if (mAudioMetadataRead && aSeekTimeUs == -1) {
// Use the data read into the buffer during metadata time
err = OK;
}
else {
ReleaseAudioBuffer();
if (aSeekTimeUs != -1) {
MediaSource::ReadOptions options;
options.setSeekTo(aSeekTimeUs);
err = mAudioSource->read(&mAudioBuffer, &options);
} else {
err = mAudioSource->read(&mAudioBuffer);
}
}
mAudioMetadataRead = false;
aSeekTimeUs = -1;
if (err == OK && mAudioBuffer->range_length() != 0) {
int64_t timeUs;
if (!mAudioBuffer->meta_data()->findInt64(kKeyTime, &timeUs))
return false;
return ToAudioFrame(aFrame, timeUs,
mAudioBuffer->data(),
mAudioBuffer->range_offset(),
mAudioBuffer->range_length(),
mAudioChannels, mAudioSampleRate);
}
else if (err == INFO_FORMAT_CHANGED && !SetAudioFormat()) {
// If the format changed, update our cached info.
return false;
}
else if (err == ERROR_END_OF_STREAM)
return false;
return true;
}
示例11: AStringPrintf
TEST_F(TimedTextSRTSourceTest, checkEdgeCase) {
MediaSource::ReadOptions options;
options.setSeekTo(5500 * kMsecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
EXPECT_EQ(OK, err);
EXPECT_EQ(5500 * kMsecToUsec, startTimeUs);
subtitle = AStringPrintf("6\n\n");
CheckDataEquals(parcel, subtitle.c_str());
options.setSeekTo(5800 * kMsecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
EXPECT_EQ(OK, err);
EXPECT_EQ(5800 * kMsecToUsec, startTimeUs);
subtitle = AStringPrintf("7\n\n");
CheckDataEquals(parcel, subtitle.c_str());
options.setSeekTo(6000 * kMsecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
EXPECT_EQ(OK, err);
EXPECT_EQ(6000 * kMsecToUsec, startTimeUs);
subtitle = AStringPrintf("8\n\n");
CheckDataEquals(parcel, subtitle.c_str());
}
示例12: autoLock
bool
OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs,
bool aKeyframeSkip, bool aDoSeek)
{
if (!mVideoSource.get())
return false;
ReleaseVideoBuffer();
status_t err;
if (aDoSeek) {
{
Mutex::Autolock autoLock(mSeekLock);
ReleaseAllPendingVideoBuffersLocked();
mIsVideoSeeking = true;
}
MediaSource::ReadOptions options;
MediaSource::ReadOptions::SeekMode seekMode;
// If the last timestamp of decoded frame is smaller than seekTime,
// seek to next key frame. Otherwise seek to the previos one.
OD_LOG("SeekTime: %lld, mLastSeekTime:%lld", aTimeUs, mLastSeekTime);
if (mLastSeekTime == -1 || mLastSeekTime > aTimeUs) {
seekMode = MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC;
} else {
seekMode = MediaSource::ReadOptions::SEEK_NEXT_SYNC;
}
mLastSeekTime = aTimeUs;
bool findNextBuffer = true;
while (findNextBuffer) {
options.setSeekTo(aTimeUs, seekMode);
findNextBuffer = false;
if (mIsVideoSeeking) {
err = mVideoSource->read(&mVideoBuffer, &options);
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = false;
PostReleaseVideoBuffer(nullptr, FenceHandle());
}
else {
err = mVideoSource->read(&mVideoBuffer);
}
// If there is no next Keyframe, jump to the previous key frame.
if (err == ERROR_END_OF_STREAM && seekMode == MediaSource::ReadOptions::SEEK_NEXT_SYNC) {
seekMode = MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC;
findNextBuffer = true;
{
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = true;
}
continue;
} else if (err != OK) {
OD_LOG("Unexpected error when seeking to %lld", aTimeUs);
break;
}
// For some codecs, the length of first decoded frame after seek is 0.
// Need to ignore it and continue to find the next one
if (mVideoBuffer->range_length() == 0) {
PostReleaseVideoBuffer(mVideoBuffer, FenceHandle());
findNextBuffer = true;
}
}
aDoSeek = false;
} else {
err = mVideoSource->read(&mVideoBuffer);
}
aFrame->mSize = 0;
if (err == OK) {
int64_t timeUs;
int32_t unreadable;
int32_t keyFrame;
size_t length = mVideoBuffer->range_length();
if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) {
NS_WARNING("OMX decoder did not return frame time");
return false;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) {
unreadable = 0;
}
RefPtr<mozilla::layers::TextureClient> textureClient;
if ((mVideoBuffer->graphicBuffer().get())) {
textureClient = mNativeWindow->getTextureClientFromBuffer(mVideoBuffer->graphicBuffer().get());
}
if (textureClient) {
// Manually increment reference count to keep MediaBuffer alive
// during TextureClient is in use.
mVideoBuffer->add_ref();
static_cast<GrallocTextureData*>(textureClient->GetInternalData())->SetMediaBuffer(mVideoBuffer);
// Set recycle callback for TextureClient
//.........这里部分代码省略.........
示例13: FillBuffer
size_t AudioOffloadPlayer::FillBuffer(void* aData, size_t aSize)
{
CHECK(mAudioSink.get());
if (mReachedEOS) {
return 0;
}
size_t sizeDone = 0;
size_t sizeRemaining = aSize;
int64_t seekTimeUs = -1;
while (sizeRemaining > 0) {
MediaSource::ReadOptions options;
bool refreshSeekTime = false;
{
android::Mutex::Autolock autoLock(mLock);
if (mSeekTarget.IsValid()) {
seekTimeUs = mSeekTarget.mTime;
options.setSeekTo(seekTimeUs);
refreshSeekTime = true;
if (mInputBuffer) {
mInputBuffer->release();
mInputBuffer = nullptr;
}
}
}
if (!mInputBuffer) {
status_t err;
err = mSource->read(&mInputBuffer, &options);
CHECK((!err && mInputBuffer) || (err && !mInputBuffer));
android::Mutex::Autolock autoLock(mLock);
if (err != OK) {
if (mSeekTarget.IsValid()) {
mSeekTarget.Reset();
}
AUDIO_OFFLOAD_LOG(PR_LOG_ERROR, ("Error while reading media source %d "
"Ok to receive EOS error at end", err));
if (!mReachedEOS) {
// After seek there is a possible race condition if
// OffloadThread is observing state_stopping_1 before
// framesReady() > 0. Ensure sink stop is called
// after last buffer is released. This ensures the
// partial buffer is written to the driver before
// stopping one is observed.The drawback is that
// there will be an unnecessary call to the parser
// after parser signalled EOS.
if (sizeDone > 0) {
AUDIO_OFFLOAD_LOG(PR_LOG_DEBUG, ("send Partial buffer down"));
AUDIO_OFFLOAD_LOG(PR_LOG_DEBUG, ("skip calling stop till next"
" fillBuffer"));
break;
}
// no more buffers to push - stop() and wait for STREAM_END
// don't set mReachedEOS until stream end received
mAudioSink->Stop();
}
break;
}
if(mInputBuffer->range_length() != 0) {
CHECK(mInputBuffer->meta_data()->findInt64(
kKeyTime, &mPositionTimeMediaUs));
}
if (mSeekTarget.IsValid() && seekTimeUs == mSeekTarget.mTime) {
MOZ_ASSERT(mSeekTarget.IsValid());
mSeekTarget.Reset();
if (!mSeekPromise.IsEmpty()) {
AUDIO_OFFLOAD_LOG(PR_LOG_DEBUG, ("FillBuffer posting SEEK_COMPLETE"));
MediaDecoder::SeekResolveValue val(mReachedEOS, mSeekTarget.mEventVisibility);
mSeekPromise.Resolve(val, __func__);
}
} else if (mSeekTarget.IsValid()) {
AUDIO_OFFLOAD_LOG(PR_LOG_DEBUG, ("seek is updated during unlocking mLock"));
}
if (refreshSeekTime) {
NotifyPositionChanged();
// need to adjust the mStartPosUs for offload decoding since parser
// might not be able to get the exact seek time requested.
mStartPosUs = mPositionTimeMediaUs;
AUDIO_OFFLOAD_LOG(PR_LOG_DEBUG, ("Adjust seek time to: %.2f",
mStartPosUs / 1E6));
}
}
if (mInputBuffer->range_length() == 0) {
mInputBuffer->release();
mInputBuffer = nullptr;
continue;
}
size_t copy = sizeRemaining;
//.........这里部分代码省略.........
示例14: start
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);
status_t err;
if (!sourceAlreadyStarted) {
#ifdef QCOM_HARDWARE
mSourcePaused = false;
#endif
err = mSource->start();
if (err != OK) {
return err;
}
}
// We allow an optional INFO_FORMAT_CHANGED at the very beginning
// of playback, if there is one, getFormat below will retrieve the
// updated format, if there isn't, we'll stash away the valid buffer
// of data to be used on the first audio callback.
CHECK(mFirstBuffer == NULL);
MediaSource::ReadOptions options;
if (mSeeking) {
options.setSeekTo(mSeekTimeUs);
mSeeking = false;
}
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
LOGV("INFO_FORMAT_CHANGED!!!");
CHECK(mFirstBuffer == NULL);
mFirstBufferResult = OK;
mIsFirstBuffer = false;
} else {
mIsFirstBuffer = true;
}
sp<MetaData> format = mSource->getFormat();
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
CHECK(success);
CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
success = format->findInt32(kKeySampleRate, &mSampleRate);
CHECK(success);
int32_t numChannels;
success = format->findInt32(kKeyChannelCount, &numChannels);
CHECK(success);
if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(
mSampleRate, numChannels, AUDIO_FORMAT_PCM_16_BIT,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
#ifdef STE_HARDWARE
&AudioPlayer::AudioSinkCallback, this,
&AudioPlayer::LatencyCallback);
#else
&AudioPlayer::AudioSinkCallback, this);
#endif
if (err != OK) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
if (!sourceAlreadyStarted) {
mSource->stop();
}
return err;
}
mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
mFrameSize = mAudioSink->frameSize();
mAudioSink->start();
} else {
示例15: ReadVideo
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs,
bool aKeyframeSkip, bool aDoSeek)
{
if (!mVideoSource.get())
return false;
ReleaseVideoBuffer();
status_t err;
if (aDoSeek) {
{
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = true;
}
MediaSource::ReadOptions options;
options.setSeekTo(aTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mVideoSource->read(&mVideoBuffer, &options);
{
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = false;
ReleaseAllPendingVideoBuffersLocked();
}
} else {
err = mVideoSource->read(&mVideoBuffer);
}
if (err == OK && mVideoBuffer->range_length() > 0) {
int64_t timeUs;
int64_t durationUs;
int32_t unreadable;
int32_t keyFrame;
if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) {
NS_WARNING("OMX decoder did not return frame time");
return false;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) {
unreadable = 0;
}
mozilla::layers::SurfaceDescriptor *descriptor = nullptr;
if ((mVideoBuffer->graphicBuffer().get())) {
descriptor = mNativeWindow->getSurfaceDescriptorFromBuffer(mVideoBuffer->graphicBuffer().get());
}
if (descriptor) {
// Change the descriptor's size to video's size. There are cases that
// GraphicBuffer's size and actual video size is different.
// See Bug 850566.
const mozilla::layers::SurfaceDescriptorGralloc& grallocDesc = descriptor->get_SurfaceDescriptorGralloc();
mozilla::layers::SurfaceDescriptor newDescriptor = mozilla::layers::SurfaceDescriptorGralloc(grallocDesc.bufferParent(),
grallocDesc.bufferChild(), nsIntSize(mVideoWidth, mVideoHeight), grallocDesc.external());
aFrame->mGraphicBuffer = new mozilla::layers::VideoGraphicBuffer(this, mVideoBuffer, &newDescriptor);
aFrame->mRotation = mVideoRotation;
aFrame->mTimeUs = timeUs;
aFrame->mEndTimeUs = timeUs + durationUs;
aFrame->mKeyFrame = keyFrame;
aFrame->Y.mWidth = mVideoWidth;
aFrame->Y.mHeight = mVideoHeight;
} else {
char *data = static_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset();
size_t length = mVideoBuffer->range_length();
if (unreadable) {
LOG(PR_LOG_DEBUG, "video frame is unreadable");
}
if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) {
return false;
}
aFrame->mEndTimeUs = timeUs + durationUs;
}
if (aKeyframeSkip && timeUs < aTimeUs) {
aFrame->mShouldSkip = true;
}
}
else if (err == INFO_FORMAT_CHANGED) {
// If the format changed, update our cached info.
if (!SetVideoFormat()) {
return false;
} else {
return ReadVideo(aFrame, aTimeUs, aKeyframeSkip, aDoSeek);
}
}
else if (err == ERROR_END_OF_STREAM) {
return false;
}
else if (err == UNKNOWN_ERROR) {
// This sometimes is used to mean "out of memory", but regardless,
// don't keep trying to decode if the decoder doesn't want to.
//.........这里部分代码省略.........