本文整理汇总了C++中mediasource::ReadOptions::setSeekTo方法的典型用法代码示例。如果您正苦于以下问题:C++ ReadOptions::setSeekTo方法的具体用法?C++ ReadOptions::setSeekTo怎么用?C++ ReadOptions::setSeekTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mediasource::ReadOptions
的用法示例。
在下文中一共展示了ReadOptions::setSeekTo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: fillBuffer
size_t AudioPlayer::fillBuffer(void *data, size_t size) {
if (mNumFramesPlayed == 0) {
LOGV("AudioCallback");
}
if (mReachedEOS) {
return 0;
}
size_t size_done = 0;
size_t size_remaining = size;
while (size_remaining > 0) {
MediaSource::ReadOptions options;
{
Mutex::Autolock autoLock(mLock);
if (mSeeking) {
if (mIsFirstBuffer) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
mIsFirstBuffer = false;
}
options.setSeekTo(mSeekTimeUs);
if (mInputBuffer != NULL) {
mInputBuffer->release();
mInputBuffer = NULL;
}
mSeeking = false;
if (mObserver) {
mObserver->postAudioSeekComplete();
}
}
}
if (mInputBuffer == NULL) {
status_t err;
if (mIsFirstBuffer) {
mInputBuffer = mFirstBuffer;
mFirstBuffer = NULL;
err = mFirstBufferResult;
mIsFirstBuffer = false;
} else {
err = mSource->read(&mInputBuffer, &options);
}
CHECK((err == OK && mInputBuffer != NULL)
|| (err != OK && mInputBuffer == NULL));
Mutex::Autolock autoLock(mLock);
if (err != OK) {
if (mObserver && !mReachedEOS) {
mObserver->postAudioEOS();
}
mReachedEOS = true;
mFinalStatus = err;
break;
}
CHECK(mInputBuffer->meta_data()->findInt64(
kKeyTime, &mPositionTimeMediaUs));
mPositionTimeRealUs =
((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
/ mSampleRate;
LOGV("buffer->size() = %d, "
"mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
mInputBuffer->range_length(),
mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
}
if (mInputBuffer->range_length() == 0) {
mInputBuffer->release();
mInputBuffer = NULL;
continue;
}
size_t copy = size_remaining;
if (copy > mInputBuffer->range_length()) {
copy = mInputBuffer->range_length();
}
memcpy((char *)data + size_done,
(const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
copy);
mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
mInputBuffer->range_length() - copy);
//.........这里部分代码省略.........
示例2: converter
static VideoFrame *extractVideoFrameWithCodecFlags(
OMXClient *client,
const sp<MetaData> &trackMeta,
const sp<MediaSource> &source,
uint32_t flags,
int64_t frameTimeUs,
int seekMode) {
sp<MediaSource> decoder =
OMXCodec::Create(
client->interface(), source->getFormat(), false, source,
NULL, flags | OMXCodec::kClientNeedsFramebuffer);
if (decoder.get() == NULL) {
ALOGV("unable to instantiate video decoder.");
return NULL;
}
status_t err = decoder->start();
if (err != OK) {
ALOGW("OMXCodec::start returned error %d (0x%08x)\n", err, err);
return NULL;
}
// Read one output buffer, ignore format change notifications
// and spurious empty buffers.
MediaSource::ReadOptions options;
if (seekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
seekMode > MediaSource::ReadOptions::SEEK_CLOSEST) {
ALOGE("Unknown seek mode: %d", seekMode);
return NULL;
}
MediaSource::ReadOptions::SeekMode mode =
static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
int64_t thumbNailTime;
if (frameTimeUs < 0) {
if (!trackMeta->findInt64(kKeyThumbnailTime, &thumbNailTime)
|| thumbNailTime < 0) {
thumbNailTime = 0;
}
options.setSeekTo(thumbNailTime, mode);
} else {
thumbNailTime = -1;
options.setSeekTo(frameTimeUs, mode);
}
MediaBuffer *buffer = NULL;
do {
if (buffer != NULL) {
buffer->release();
buffer = NULL;
}
err = decoder->read(&buffer, &options);
options.clearSeekTo();
} while (err == INFO_FORMAT_CHANGED
|| (buffer != NULL && buffer->range_length() == 0));
if (err != OK) {
CHECK(buffer == NULL);
ALOGV("decoding frame failed.");
decoder->stop();
return NULL;
}
ALOGV("successfully decoded video frame.");
int32_t unreadable;
if (buffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)
&& unreadable != 0) {
ALOGV("video frame is unreadable, decoder does not give us access "
"to the video data.");
buffer->release();
buffer = NULL;
decoder->stop();
return NULL;
}
int64_t timeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
if (thumbNailTime >= 0) {
if (timeUs != thumbNailTime) {
const char *mime;
CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
ALOGV("thumbNailTime = %lld us, timeUs = %lld us, mime = %s",
thumbNailTime, timeUs, mime);
}
}
sp<MetaData> meta = decoder->getFormat();
//.........这里部分代码省略.........
示例3: testSeek
status_t Harness::testSeek(
const char *componentName, const char *componentRole) {
bool isEncoder =
!strncmp(componentRole, "audio_encoder.", 14)
|| !strncmp(componentRole, "video_encoder.", 14);
if (isEncoder) {
// Not testing seek behaviour for encoders.
printf(" * Not testing seek functionality for encoders.\n");
return OK;
}
const char *mime = GetMimeFromComponentRole(componentRole);
if (!mime) {
printf(" * Cannot perform seek test with this componentRole (%s)\n",
componentRole);
return OK;
}
sp<MediaSource> source = CreateSourceForMime(mime);
if (source == NULL) {
printf(" * Unable to open test content for type '%s', "
"skipping test of componentRole %s\n",
mime, componentRole);
return OK;
}
sp<MediaSource> seekSource = CreateSourceForMime(mime);
if (source == NULL || seekSource == NULL) {
return UNKNOWN_ERROR;
}
CHECK_EQ(seekSource->start(), (status_t)OK);
sp<MediaSource> codec = OMXCodec::Create(
mOMX, source->getFormat(), false /* createEncoder */,
source, componentName);
CHECK(codec != NULL);
CHECK_EQ(codec->start(), (status_t)OK);
int64_t durationUs;
CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs));
ALOGI("stream duration is %lld us (%.2f secs)",
durationUs, durationUs / 1E6);
static const int32_t kNumIterations = 5000;
// We are always going to seek beyond EOS in the first iteration (i == 0)
// followed by a linear read for the second iteration (i == 1).
// After that it's all random.
for (int32_t i = 0; i < kNumIterations; ++i) {
int64_t requestedSeekTimeUs;
int64_t actualSeekTimeUs;
MediaSource::ReadOptions options;
double r = uniform_rand();
if ((i == 1) || (i > 0 && r < 0.5)) {
// 50% chance of just continuing to decode from last position.
requestedSeekTimeUs = -1;
ALOGI("requesting linear read");
} else {
if (i == 0 || r < 0.55) {
// 5% chance of seeking beyond end of stream.
requestedSeekTimeUs = durationUs;
ALOGI("requesting seek beyond EOF");
} else {
requestedSeekTimeUs =
(int64_t)(uniform_rand() * durationUs);
ALOGI("requesting seek to %lld us (%.2f secs)",
requestedSeekTimeUs, requestedSeekTimeUs / 1E6);
}
MediaBuffer *buffer = NULL;
options.setSeekTo(
requestedSeekTimeUs, MediaSource::ReadOptions::SEEK_NEXT_SYNC);
if (seekSource->read(&buffer, &options) != OK) {
CHECK(buffer == NULL);
actualSeekTimeUs = -1;
} else {
CHECK(buffer != NULL);
CHECK(buffer->meta_data()->findInt64(kKeyTime, &actualSeekTimeUs));
CHECK(actualSeekTimeUs >= 0);
buffer->release();
buffer = NULL;
//.........这里部分代码省略.........
示例4: FillBuffer
size_t AudioOffloadPlayer::FillBuffer(void* aData, size_t aSize)
{
CHECK(mAudioSink.get());
if (mReachedEOS) {
return 0;
}
size_t sizeDone = 0;
size_t sizeRemaining = aSize;
int64_t seekTimeUs = -1;
while (sizeRemaining > 0) {
MediaSource::ReadOptions options;
bool refreshSeekTime = false;
{
android::Mutex::Autolock autoLock(mLock);
if (mSeekTarget.IsValid()) {
seekTimeUs = mSeekTarget.GetTime().ToMicroseconds();
options.setSeekTo(seekTimeUs);
refreshSeekTime = true;
if (mInputBuffer) {
mInputBuffer->release();
mInputBuffer = nullptr;
}
}
}
if (!mInputBuffer) {
status_t err;
err = mSource->read(&mInputBuffer, &options);
CHECK((!err && mInputBuffer) || (err && !mInputBuffer));
android::Mutex::Autolock autoLock(mLock);
if (err != OK) {
if (mSeekTarget.IsValid()) {
mSeekTarget.Reset();
}
AUDIO_OFFLOAD_LOG(LogLevel::Error, ("Error while reading media source %d "
"Ok to receive EOS error at end", err));
if (!mReachedEOS) {
// After seek there is a possible race condition if
// OffloadThread is observing state_stopping_1 before
// framesReady() > 0. Ensure sink stop is called
// after last buffer is released. This ensures the
// partial buffer is written to the driver before
// stopping one is observed.The drawback is that
// there will be an unnecessary call to the parser
// after parser signalled EOS.
if (sizeDone > 0) {
AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("send Partial buffer down"));
AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("skip calling stop till next"
" fillBuffer"));
break;
}
// no more buffers to push - stop() and wait for STREAM_END
// don't set mReachedEOS until stream end received
mAudioSink->Stop();
}
break;
}
if(mInputBuffer->range_length() != 0) {
CHECK(mInputBuffer->meta_data()->findInt64(
kKeyTime, &mPositionTimeMediaUs));
}
if (mSeekTarget.IsValid() &&
seekTimeUs == mSeekTarget.GetTime().ToMicroseconds()) {
MOZ_ASSERT(mSeekTarget.IsValid());
mSeekTarget.Reset();
if (!mSeekPromise.IsEmpty()) {
AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("FillBuffer posting SEEK_COMPLETE"));
MediaDecoder::SeekResolveValue val(mReachedEOS, mSeekTarget.mEventVisibility);
mSeekPromise.Resolve(val, __func__);
}
} else if (mSeekTarget.IsValid()) {
AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("seek is updated during unlocking mLock"));
}
if (refreshSeekTime) {
NotifyPositionChanged();
// need to adjust the mStartPosUs for offload decoding since parser
// might not be able to get the exact seek time requested.
mStartPosUs = mPositionTimeMediaUs;
AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("Adjust seek time to: %.2f",
mStartPosUs / 1E6));
}
}
if (mInputBuffer->range_length() == 0) {
mInputBuffer->release();
mInputBuffer = nullptr;
continue;
}
//.........这里部分代码省略.........
示例5: Process
void Process()
{
Frame* frame;
int32_t w, h;
int decode_done = 0;
MediaSource::ReadOptions readopt;
// GLuint texid;
//SetPriority(THREAD_PRIORITY_ABOVE_NORMAL);
do
{
#if defined(DEBUG_VERBOSE)
unsigned int time = XbmcThreads::SystemClockMillis();
CLog::Log(LOGDEBUG, "%s: >>> Handling frame\n", CLASSNAME);
#endif
p->cur_frame = NULL;
frame = (Frame*)malloc(sizeof(Frame));
if (!frame)
{
decode_done = 1;
continue;
}
frame->eglimg = EGL_NO_IMAGE_KHR;
frame->medbuf = NULL;
if (p->resetting)
{
readopt.setSeekTo(0);
p->resetting = false;
}
frame->status = p->decoder->read(&frame->medbuf, &readopt);
readopt.clearSeekTo();
if (frame->status == OK)
{
if (!frame->medbuf->graphicBuffer().get()) // hw buffers
{
if (frame->medbuf->range_length() == 0)
{
CLog::Log(LOGERROR, "%s - Invalid buffer\n", CLASSNAME);
frame->status = VC_ERROR;
decode_done = 1;
frame->medbuf->release();
frame->medbuf = NULL;
}
else
frame->format = RENDER_FMT_YUV420P;
}
else
frame->format = RENDER_FMT_EGLIMG;
}
if (frame->status == OK)
{
sp<MetaData> outFormat = p->decoder->getFormat();
outFormat->findInt32(kKeyWidth , &w);
outFormat->findInt32(kKeyHeight, &h);
frame->pts = 0;
frame->width = w;
frame->height = h;
frame->medbuf->meta_data()->findInt64(kKeyTime, &(frame->pts));
}
else if (frame->status == INFO_FORMAT_CHANGED)
{
int32_t cropLeft, cropTop, cropRight, cropBottom;
sp<MetaData> outFormat = p->decoder->getFormat();
outFormat->findInt32(kKeyWidth , &p->width);
outFormat->findInt32(kKeyHeight, &p->height);
cropLeft = cropTop = cropRight = cropBottom = 0;
if (!outFormat->findRect(kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom))
{
p->x = 0;
p->y = 0;
}
else
{
p->x = cropLeft;
p->y = cropTop;
p->width = cropRight - cropLeft + 1;
p->height = cropBottom - cropTop + 1;
}
outFormat->findInt32(kKeyColorFormat, &p->videoColorFormat);
if (!outFormat->findInt32(kKeyStride, &p->videoStride))
p->videoStride = p->width;
if (!outFormat->findInt32(kKeySliceHeight, &p->videoSliceHeight))
p->videoSliceHeight = p->height;
#if defined(DEBUG_VERBOSE)
CLog::Log(LOGDEBUG, ">>> new format col:%d, w:%d, h:%d, sw:%d, sh:%d, ctl:%d,%d; cbr:%d,%d\n", p->videoColorFormat, p->width, p->height, p->videoStride, p->videoSliceHeight, cropTop, cropLeft, cropBottom, cropRight);
#endif
if (frame->medbuf)
frame->medbuf->release();
frame->medbuf = NULL;
free(frame);
continue;
}
//.........这里部分代码省略.........
示例6: start
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);
status_t err;
if (!sourceAlreadyStarted) {
mSourcePaused = false;
err = mSource->start();
if (err != OK) {
return err;
}
}
ALOGD("start of Playback, useOffload %d",useOffload());
// We allow an optional INFO_FORMAT_CHANGED at the very beginning
// of playback, if there is one, getFormat below will retrieve the
// updated format, if there isn't, we'll stash away the valid buffer
// of data to be used on the first audio callback.
CHECK(mFirstBuffer == NULL);
MediaSource::ReadOptions options;
if (mSeeking) {
options.setSeekTo(mSeekTimeUs);
}
do {
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
} while (mFirstBufferResult == -EAGAIN);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
ALOGV("INFO_FORMAT_CHANGED!!!");
CHECK(mFirstBuffer == NULL);
mFirstBufferResult = OK;
mIsFirstBuffer = false;
if (mSeeking) {
mPositionTimeRealUs = 0;
mPositionTimeMediaUs = mSeekTimeUs;
mSeeking = false;
}
} else {
mIsFirstBuffer = true;
if (mSeeking) {
mPositionTimeRealUs = 0;
if (mFirstBuffer == NULL || !mFirstBuffer->meta_data()->findInt64(
kKeyTime, &mPositionTimeMediaUs)) {
return UNKNOWN_ERROR;
}
mSeeking = false;
}
}
sp<MetaData> format = mSource->getFormat();
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
CHECK(success);
CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
success = format->findInt32(kKeySampleRate, &mSampleRate);
CHECK(success);
int32_t numChannels, channelMask = 0;
success = format->findInt32(kKeyChannelCount, &numChannels);
CHECK(success);
format->findInt64(kKeyDuration, &mDurationUs);
if(!format->findInt32(kKeyChannelMask, &channelMask)) {
// log only when there's a risk of ambiguity of channel mask selection
ALOGI_IF(numChannels > 2,
"source format didn't specify channel mask, using (%d) channel order", numChannels);
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
} else if (channelMask == 0) {
channelMask = audio_channel_out_mask_from_count(numChannels);
ALOGV("channel mask is zero,update from channel count %d", channelMask);
}
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
int32_t bitWidth = 16;
#if defined(ENABLE_AV_ENHANCEMENTS) || defined(ENABLE_OFFLOAD_ENHANCEMENTS)
format->findInt32(kKeySampleBits, &bitWidth);
#endif
if (useOffload()) {
if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime);
audioFormat = AUDIO_FORMAT_INVALID;
} else if (audio_is_linear_pcm(audioFormat) || audio_is_offload_pcm(audioFormat)) {
#if defined(QCOM_HARDWARE) || defined(ENABLE_OFFLOAD_ENHANCEMENTS)
// Override audio format for PCM offload
if (bitWidth >= 24) {
ALOGD("24-bit PCM offload enabled format=%d", audioFormat);
audioFormat = AUDIO_FORMAT_PCM_24_BIT_OFFLOAD;
//.........这里部分代码省略.........
示例7: CHECK
void NuPlayer::GenericSource::readBuffer(
bool audio, int64_t seekTimeUs, int64_t *actualTimeUs) {
Track *track = audio ? &mAudioTrack : &mVideoTrack;
CHECK(track->mSource != NULL);
if (actualTimeUs) {
*actualTimeUs = seekTimeUs;
}
MediaSource::ReadOptions options;
bool seeking = false;
if (seekTimeUs >= 0) {
options.setSeekTo(seekTimeUs);
seeking = true;
}
for (;;) {
MediaBuffer *mbuf;
status_t err = track->mSource->read(&mbuf, &options);
options.clearSeekTo();
if (err == OK) {
size_t outLength = mbuf->range_length();
if (audio && mAudioIsVorbis) {
outLength += sizeof(int32_t);
}
sp<ABuffer> buffer = new ABuffer(outLength);
memcpy(buffer->data(),
(const uint8_t *)mbuf->data() + mbuf->range_offset(),
mbuf->range_length());
if (audio && mAudioIsVorbis) {
int32_t numPageSamples;
if (!mbuf->meta_data()->findInt32(
kKeyValidSamples, &numPageSamples)) {
numPageSamples = -1;
}
memcpy(buffer->data() + mbuf->range_length(),
&numPageSamples,
sizeof(numPageSamples));
}
int64_t timeUs;
CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
buffer->meta()->setInt64("timeUs", timeUs);
if (actualTimeUs) {
*actualTimeUs = timeUs;
}
mbuf->release();
mbuf = NULL;
if (seeking) {
track->mPackets->queueDiscontinuity(
ATSParser::DISCONTINUITY_SEEK, NULL);
}
track->mPackets->queueAccessUnit(buffer);
break;
} else if (err == INFO_FORMAT_CHANGED) {
#if 0
track->mPackets->queueDiscontinuity(
ATSParser::DISCONTINUITY_FORMATCHANGE, NULL);
#endif
} else {
track->mPackets->signalEOS(err);
break;
}
}
}
示例8: ReadVideo
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs,
bool aKeyframeSkip, bool aDoSeek)
{
if (!mVideoSource.get())
return false;
ReleaseVideoBuffer();
status_t err;
if (aDoSeek) {
{
Mutex::Autolock autoLock(mSeekLock);
ReleaseAllPendingVideoBuffersLocked();
mIsVideoSeeking = true;
}
MediaSource::ReadOptions options;
MediaSource::ReadOptions::SeekMode seekMode;
// If the last timestamp of decoded frame is smaller than seekTime,
// seek to next key frame. Otherwise seek to the previos one.
OD_LOG("SeekTime: %lld, mLastSeekTime:%lld", aTimeUs, mLastSeekTime);
if (mLastSeekTime == -1 || mLastSeekTime > aTimeUs) {
seekMode = MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC;
} else {
seekMode = MediaSource::ReadOptions::SEEK_NEXT_SYNC;
}
mLastSeekTime = aTimeUs;
bool findNextBuffer = true;
while (findNextBuffer) {
options.setSeekTo(aTimeUs, seekMode);
findNextBuffer = false;
if (mIsVideoSeeking) {
err = mVideoSource->read(&mVideoBuffer, &options);
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = false;
PostReleaseVideoBuffer(nullptr, FenceHandle());
}
else {
err = mVideoSource->read(&mVideoBuffer);
}
// If there is no next Keyframe, jump to the previous key frame.
if (err == ERROR_END_OF_STREAM && seekMode == MediaSource::ReadOptions::SEEK_NEXT_SYNC) {
seekMode = MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC;
findNextBuffer = true;
{
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = true;
}
continue;
} else if (err != OK) {
OD_LOG("Unexpected error when seeking to %lld", aTimeUs);
break;
}
// For some codecs, the length of first decoded frame after seek is 0.
// Need to ignore it and continue to find the next one
if (mVideoBuffer->range_length() == 0) {
PostReleaseVideoBuffer(mVideoBuffer, FenceHandle());
findNextBuffer = true;
}
}
aDoSeek = false;
} else {
err = mVideoSource->read(&mVideoBuffer);
}
aFrame->mSize = 0;
if (err == OK) {
int64_t timeUs;
int32_t unreadable;
int32_t keyFrame;
size_t length = mVideoBuffer->range_length();
if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) {
NS_WARNING("OMX decoder did not return frame time");
return false;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) {
unreadable = 0;
}
RefPtr<mozilla::layers::TextureClient> textureClient;
if ((mVideoBuffer->graphicBuffer().get())) {
textureClient = mNativeWindow->getTextureClientFromBuffer(mVideoBuffer->graphicBuffer().get());
}
if (textureClient) {
// Manually increment reference count to keep MediaBuffer alive
// during TextureClient is in use.
mVideoBuffer->add_ref();
GrallocTextureClientOGL* grallocClient = static_cast<GrallocTextureClientOGL*>(textureClient.get());
grallocClient->SetMediaBuffer(mVideoBuffer);
// Set recycle callback for TextureClient
//.........这里部分代码省略.........
示例9: fillBuffer
size_t VideoEditorAudioPlayer::fillBuffer(void *data, size_t size) {
if (mReachedEOS) {
return 0;
}
size_t size_done = 0;
size_t size_remaining = size;
M4OSA_ERR err = M4NO_ERROR;
M4AM_Buffer16 bgFrame = {NULL, 0};
M4AM_Buffer16 mixFrame = {NULL, 0};
M4AM_Buffer16 ptFrame = {NULL, 0};
int64_t currentSteamTS = 0;
int64_t startTimeForBT = 0;
M4OSA_Float fPTVolLevel =
((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal)/100;
M4OSA_Int16 *pPTMdata=NULL;
M4OSA_UInt32 uiPCMsize = 0;
bool postSeekComplete = false;
bool postEOS = false;
while ((size_remaining > 0)&&(err==M4NO_ERROR)) {
MediaSource::ReadOptions options;
{
Mutex::Autolock autoLock(mLock);
if (mSeeking) {
if (mIsFirstBuffer) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
mIsFirstBuffer = false;
}
options.setSeekTo(mSeekTimeUs);
if (mInputBuffer != NULL) {
mInputBuffer->release();
mInputBuffer = NULL;
}
mSeeking = false;
if (mObserver) {
postSeekComplete = true;
}
}
}
if (mInputBuffer == NULL) {
status_t status = OK;
if (mIsFirstBuffer) {
mInputBuffer = mFirstBuffer;
mFirstBuffer = NULL;
status = mFirstBufferResult;
mIsFirstBuffer = false;
} else {
{
Mutex::Autolock autoLock(mLock);
status = mSource->read(&mInputBuffer, &options);
}
// Data is Primary Track, mix with background track
// after reading same size from Background track PCM file
if (status == OK)
{
// Mix only when skim point is after startTime of BT
if (((mBGAudioStoryBoardSkimTimeStamp* 1000) +
(mPositionTimeMediaUs - mSeekTimeUs)) >=
(int64_t)(mAudioMixSettings->uiAddCts * 1000)) {
ALOGV("VideoEditorAudioPlayer::INSIDE MIXING");
ALOGV("Checking %lld <= %lld",
mBGAudioPCMFileSeekPoint-mBGAudioPCMFileOriginalSeekPoint,
mBGAudioPCMFileTrimmedLength);
M4OSA_Void* ptr;
ptr = (M4OSA_Void*)((unsigned int)mInputBuffer->data() +
mInputBuffer->range_offset());
M4OSA_UInt32 len = mInputBuffer->range_length();
M4OSA_Context fp = M4OSA_NULL;
uiPCMsize = (mInputBuffer->range_length())/2;
pPTMdata = (M4OSA_Int16*) ((uint8_t*) mInputBuffer->data()
+ mInputBuffer->range_offset());
ALOGV("mix with background malloc to do len %d", len);
bgFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc( len, 1,
(M4OSA_Char*)"bgFrame");
bgFrame.m_bufferSize = len;
mixFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc(len, 1,
//.........这里部分代码省略.........
示例10: ReadVideo
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs,
bool aKeyframeSkip, bool aDoSeek)
{
if (!mVideoSource.get())
return false;
ReleaseVideoBuffer();
status_t err;
if (aDoSeek) {
{
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = true;
}
MediaSource::ReadOptions options;
options.setSeekTo(aTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mVideoSource->read(&mVideoBuffer, &options);
{
Mutex::Autolock autoLock(mSeekLock);
mIsVideoSeeking = false;
ReleaseAllPendingVideoBuffersLocked();
}
aDoSeek = false;
} else {
err = mVideoSource->read(&mVideoBuffer);
}
aFrame->mSize = 0;
if (err == OK) {
int64_t timeUs;
int32_t unreadable;
int32_t keyFrame;
if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) {
NS_WARNING("OMX decoder did not return frame time");
return false;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) {
unreadable = 0;
}
mozilla::layers::SurfaceDescriptor *descriptor = nullptr;
if ((mVideoBuffer->graphicBuffer().get())) {
descriptor = mNativeWindow->getSurfaceDescriptorFromBuffer(mVideoBuffer->graphicBuffer().get());
}
if (descriptor) {
// Change the descriptor's size to video's size. There are cases that
// GraphicBuffer's size and actual video size is different.
// See Bug 850566.
mozilla::layers::SurfaceDescriptorGralloc newDescriptor = descriptor->get_SurfaceDescriptorGralloc();
newDescriptor.size() = nsIntSize(mVideoWidth, mVideoHeight);
mozilla::layers::SurfaceDescriptor descWrapper(newDescriptor);
aFrame->mGraphicBuffer = new mozilla::layers::VideoGraphicBuffer(this, mVideoBuffer, descWrapper);
aFrame->mRotation = mVideoRotation;
aFrame->mTimeUs = timeUs;
aFrame->mKeyFrame = keyFrame;
aFrame->Y.mWidth = mVideoWidth;
aFrame->Y.mHeight = mVideoHeight;
} else if (mVideoBuffer->range_length() > 0) {
char *data = static_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset();
size_t length = mVideoBuffer->range_length();
if (unreadable) {
LOG(PR_LOG_DEBUG, "video frame is unreadable");
}
if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) {
return false;
}
}
if (aKeyframeSkip && timeUs < aTimeUs) {
aFrame->mShouldSkip = true;
}
}
else if (err == INFO_FORMAT_CHANGED) {
// If the format changed, update our cached info.
if (!SetVideoFormat()) {
return false;
} else {
return ReadVideo(aFrame, aTimeUs, aKeyframeSkip, aDoSeek);
}
}
else if (err == ERROR_END_OF_STREAM) {
return false;
}
else if (err == -ETIMEDOUT) {
LOG(PR_LOG_DEBUG, "OmxDecoder::ReadVideo timed out, will retry");
return true;
}
//.........这里部分代码省略.........
示例11: playSource
//.........这里部分代码省略.........
}
if ((gReproduceBug == 4 && diff > 500000)
|| (gReproduceBug == 5 && timestampUs < 0)) {
printf("wanted: %.2f secs, got: %.2f secs\n",
seekTimeUs / 1E6, timestampUs / 1E6);
printf("ERROR: ");
failed = true;
}
}
printf("buffer has timestamp %lld us (%.2f secs)\n",
timestampUs, timestampUs / 1E6);
buffer->release();
buffer = NULL;
if (failed) {
break;
}
shouldSeek = ((double)rand() / RAND_MAX) < 0.1;
if (gReproduceBug == 3) {
shouldSeek = false;
}
}
seekTimeUs = -1;
if (shouldSeek) {
seekTimeUs = (rand() * (float)durationUs) / RAND_MAX;
options.setSeekTo(seekTimeUs);
printf("seeking to %lld us (%.2f secs)\n",
seekTimeUs, seekTimeUs / 1E6);
}
}
rawSource->stop();
return;
}
int n = 0;
int64_t startTime = getNowUs();
long numIterationsLeft = gNumRepetitions;
MediaSource::ReadOptions options;
int64_t sumDecodeUs = 0;
int64_t totalBytes = 0;
while (numIterationsLeft-- > 0) {
long numFrames = 0;
MediaBuffer *buffer;
for (;;) {
int64_t startDecodeUs = getNowUs();
status_t err = rawSource->read(&buffer, &options);
int64_t delayDecodeUs = getNowUs() - startDecodeUs;
options.clearSeekTo();
示例12: start
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);
status_t err;
if (!sourceAlreadyStarted) {
#ifdef QCOM_HARDWARE
mSourcePaused = false;
#endif
err = mSource->start();
if (err != OK) {
return err;
}
}
// We allow an optional INFO_FORMAT_CHANGED at the very beginning
// of playback, if there is one, getFormat below will retrieve the
// updated format, if there isn't, we'll stash away the valid buffer
// of data to be used on the first audio callback.
CHECK(mFirstBuffer == NULL);
MediaSource::ReadOptions options;
if (mSeeking) {
options.setSeekTo(mSeekTimeUs);
mSeeking = false;
}
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
ALOGV("INFO_FORMAT_CHANGED!!!");
CHECK(mFirstBuffer == NULL);
mFirstBufferResult = OK;
mIsFirstBuffer = false;
} else {
mIsFirstBuffer = true;
}
sp<MetaData> format = mSource->getFormat();
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
CHECK(success);
CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
success = format->findInt32(kKeySampleRate, &mSampleRate);
CHECK(success);
int32_t numChannels, channelMask;
success = format->findInt32(kKeyChannelCount, &numChannels);
CHECK(success);
if(!format->findInt32(kKeyChannelMask, &channelMask)) {
// log only when there's a risk of ambiguity of channel mask selection
ALOGI_IF(numChannels > 2,
"source format didn't specify channel mask, using (%d) channel order", numChannels);
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
}
if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(
mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback,
this,
(mAllowDeepBuffering ?
AUDIO_OUTPUT_FLAG_DEEP_BUFFER :
AUDIO_OUTPUT_FLAG_NONE));
if (err != OK) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
if (!sourceAlreadyStarted) {
mSource->stop();
}
return err;
}
mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
mFrameSize = mAudioSink->frameSize();
mAudioSink->start();
} else {
// playing to an AudioTrack, set up mask if necessary
audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
audio_channel_out_mask_from_count(numChannels) : channelMask;
if (0 == audioMask) {
return BAD_VALUE;
}
mAudioTrack = new AudioTrack(
AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
if ((err = mAudioTrack->initCheck()) != OK) {
//.........这里部分代码省略.........
示例13: fillBuffer
size_t AudioPlayer::fillBuffer(void *data, size_t size) {
if (mNumFramesPlayed == 0) {
ALOGV("AudioCallback");
}
if (mReachedEOS) {
return 0;
}
bool postSeekComplete = false;
bool postEOS = false;
int64_t postEOSDelayUs = 0;
size_t size_done = 0;
size_t size_remaining = size;
while (size_remaining > 0) {
MediaSource::ReadOptions options;
{
Mutex::Autolock autoLock(mLock);
if (mSeeking) {
if (mIsFirstBuffer) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
mIsFirstBuffer = false;
}
options.setSeekTo(mSeekTimeUs);
if (mInputBuffer != NULL) {
mInputBuffer->release();
mInputBuffer = NULL;
}
mSeeking = false;
if (mObserver) {
postSeekComplete = true;
}
}
}
if (mInputBuffer == NULL) {
status_t err;
if (mIsFirstBuffer) {
mInputBuffer = mFirstBuffer;
mFirstBuffer = NULL;
err = mFirstBufferResult;
mIsFirstBuffer = false;
} else {
err = mSource->read(&mInputBuffer, &options);
#ifdef QCOM_HARDWARE
if (err == OK && mInputBuffer == NULL && mSourcePaused) {
ALOGV("mSourcePaused, return 0 from fillBuffer");
return 0;
}
#endif
}
CHECK((err == OK && mInputBuffer != NULL)
|| (err != OK && mInputBuffer == NULL));
Mutex::Autolock autoLock(mLock);
if (err != OK) {
if (mObserver && !mReachedEOS) {
// We don't want to post EOS right away but only
// after all frames have actually been played out.
// These are the number of frames submitted to the
// AudioTrack that you haven't heard yet.
uint32_t numFramesPendingPlayout =
getNumFramesPendingPlayout();
// These are the number of frames we're going to
// submit to the AudioTrack by returning from this
// callback.
uint32_t numAdditionalFrames = size_done / mFrameSize;
numFramesPendingPlayout += numAdditionalFrames;
int64_t timeToCompletionUs =
(1000000ll * numFramesPendingPlayout) / mSampleRate;
ALOGV("total number of frames played: %lld (%lld us)",
(mNumFramesPlayed + numAdditionalFrames),
1000000ll * (mNumFramesPlayed + numAdditionalFrames)
/ mSampleRate);
ALOGV("%d frames left to play, %lld us (%.2f secs)",
numFramesPendingPlayout,
timeToCompletionUs, timeToCompletionUs / 1E6);
postEOS = true;
if (mAudioSink->needsTrailingPadding()) {
postEOSDelayUs = timeToCompletionUs + mLatencyUs;
//.........这里部分代码省略.........
示例14: ReadVideo
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aSeekTimeUs)
{
if (!mVideoSource.get())
return false;
for (;;) {
ReleaseVideoBuffer();
status_t err;
if (aSeekTimeUs != -1) {
MediaSource::ReadOptions options;
options.setSeekTo(aSeekTimeUs);
err = mVideoSource->read(&mVideoBuffer, &options);
} else {
err = mVideoSource->read(&mVideoBuffer);
}
aSeekTimeUs = -1;
if (err == OK) {
if (mVideoBuffer->range_length() == 0) // If we get a spurious empty buffer, keep going
continue;
int64_t timeUs;
int32_t unreadable;
int32_t keyFrame;
if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) {
LOG("no key time");
return false;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) {
unreadable = 0;
}
LOG("data: %p size: %u offset: %u length: %u unreadable: %d",
mVideoBuffer->data(),
mVideoBuffer->size(),
mVideoBuffer->range_offset(),
mVideoBuffer->range_length(),
unreadable);
char *data = reinterpret_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset();
size_t length = mVideoBuffer->range_length();
if (unreadable) {
LOG("video frame is unreadable");
}
if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) {
return false;
}
return true;
}
if (err == INFO_FORMAT_CHANGED) {
// If the format changed, update our cached info.
if (!SetVideoFormat()) {
return false;
}
// Ok, try to read a buffer again.
continue;
}
/* err == ERROR_END_OF_STREAM */
break;
}
return false;
}
示例15: ReadVideo
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs,
bool aKeyframeSkip, bool aDoSeek)
{
if (!mVideoSource.get())
return false;
ReleaseVideoBuffer();
status_t err;
if (aDoSeek) {
MediaSource::ReadOptions options;
options.setSeekTo(aTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
err = mVideoSource->read(&mVideoBuffer, &options);
} else {
err = mVideoSource->read(&mVideoBuffer);
}
if (err == OK && mVideoBuffer->range_length() > 0) {
int64_t timeUs;
int64_t durationUs;
int32_t unreadable;
int32_t keyFrame;
if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) {
NS_WARNING("OMX decoder did not return frame time");
return false;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
keyFrame = 0;
}
if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) {
unreadable = 0;
}
mozilla::layers::SurfaceDescriptor *descriptor = nullptr;
if ((mVideoBuffer->graphicBuffer().get())) {
descriptor = mNativeWindow->getSurfaceDescriptorFromBuffer(mVideoBuffer->graphicBuffer().get());
}
if (descriptor) {
aFrame->mGraphicBuffer = new mozilla::layers::VideoGraphicBuffer(mVideoBuffer, descriptor);
aFrame->mRotation = mVideoRotation;
aFrame->mTimeUs = timeUs;
aFrame->mEndTimeUs = timeUs + durationUs;
aFrame->mKeyFrame = keyFrame;
aFrame->Y.mWidth = mVideoWidth;
aFrame->Y.mHeight = mVideoHeight;
} else {
char *data = static_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset();
size_t length = mVideoBuffer->range_length();
if (unreadable) {
LOG(PR_LOG_DEBUG, "video frame is unreadable");
}
if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) {
return false;
}
aFrame->mEndTimeUs = timeUs + durationUs;
}
if (aKeyframeSkip && timeUs < aTimeUs) {
aFrame->mShouldSkip = true;
}
}
else if (err == INFO_FORMAT_CHANGED) {
// If the format changed, update our cached info.
if (!SetVideoFormat()) {
return false;
} else {
return ReadVideo(aFrame, aTimeUs, aKeyframeSkip, aDoSeek);
}
}
else if (err == ERROR_END_OF_STREAM) {
return false;
}
else if (err == UNKNOWN_ERROR) {
// This sometimes is used to mean "out of memory", but regardless,
// don't keep trying to decode if the decoder doesn't want to.
return false;
}
return true;
}