本文整理汇总了C++中media::TimeUnit::ToMicroseconds方法的典型用法代码示例。如果您正苦于以下问题:C++ TimeUnit::ToMicroseconds方法的具体用法?C++ TimeUnit::ToMicroseconds怎么用?C++ TimeUnit::ToMicroseconds使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类media::TimeUnit
的用法示例。
在下文中一共展示了TimeUnit::ToMicroseconds方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CreateAndCopyData
already_AddRefed<MediaData>
Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
{
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
// 2x2 subsampled.
const int sizeY = mFrameWidth * mFrameHeight;
const int sizeCbCr = ((mFrameWidth + 1) / 2) * ((mFrameHeight + 1) / 2);
auto frame = MakeUnique<uint8_t[]>(sizeY + sizeCbCr);
VideoData::YCbCrBuffer buffer;
// Y plane.
buffer.mPlanes[0].mData = frame.get();
buffer.mPlanes[0].mStride = mFrameWidth;
buffer.mPlanes[0].mHeight = mFrameHeight;
buffer.mPlanes[0].mWidth = mFrameWidth;
buffer.mPlanes[0].mOffset = 0;
buffer.mPlanes[0].mSkip = 0;
// Cb plane.
buffer.mPlanes[1].mData = frame.get() + sizeY;
buffer.mPlanes[1].mStride = mFrameWidth / 2;
buffer.mPlanes[1].mHeight = mFrameHeight / 2;
buffer.mPlanes[1].mWidth = mFrameWidth / 2;
buffer.mPlanes[1].mOffset = 0;
buffer.mPlanes[1].mSkip = 0;
// Cr plane.
buffer.mPlanes[2].mData = frame.get() + sizeY;
buffer.mPlanes[2].mStride = mFrameWidth / 2;
buffer.mPlanes[2].mHeight = mFrameHeight / 2;
buffer.mPlanes[2].mWidth = mFrameWidth / 2;
buffer.mPlanes[2].mOffset = 0;
buffer.mPlanes[2].mSkip = 0;
// Set to color white.
memset(buffer.mPlanes[0].mData, 255, sizeY);
memset(buffer.mPlanes[1].mData, 128, sizeCbCr);
return VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aOffsetInStream,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
buffer,
true,
aDTS.ToMicroseconds(),
mPicture);
}
示例2: mon
int64_t
MP4TrackDemuxer::GetEvictionOffset(media::TimeUnit aTime)
{
MonitorAutoLock mon(mMonitor);
uint64_t offset = mIndex->GetEvictionOffset(aTime.ToMicroseconds());
return int64_t(offset == std::numeric_limits<uint64_t>::max() ? 0 : offset);
}
示例3: Create
already_AddRefed<MediaData>
Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
{
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
// 2x2 subsampled. Have the data pointers of each frame point to the
// first plane, they'll always be zero'd memory anyway.
nsAutoArrayPtr<uint8_t> frame(new uint8_t[mFrameWidth * mFrameHeight]);
memset(frame, 0, mFrameWidth * mFrameHeight);
VideoData::YCbCrBuffer buffer;
// Y plane.
buffer.mPlanes[0].mData = frame;
buffer.mPlanes[0].mStride = mFrameWidth;
buffer.mPlanes[0].mHeight = mFrameHeight;
buffer.mPlanes[0].mWidth = mFrameWidth;
buffer.mPlanes[0].mOffset = 0;
buffer.mPlanes[0].mSkip = 0;
// Cb plane.
buffer.mPlanes[1].mData = frame;
buffer.mPlanes[1].mStride = mFrameWidth / 2;
buffer.mPlanes[1].mHeight = mFrameHeight / 2;
buffer.mPlanes[1].mWidth = mFrameWidth / 2;
buffer.mPlanes[1].mOffset = 0;
buffer.mPlanes[1].mSkip = 0;
// Cr plane.
buffer.mPlanes[2].mData = frame;
buffer.mPlanes[2].mStride = mFrameWidth / 2;
buffer.mPlanes[2].mHeight = mFrameHeight / 2;
buffer.mPlanes[2].mWidth = mFrameWidth / 2;
buffer.mPlanes[2].mOffset = 0;
buffer.mPlanes[2].mSkip = 0;
return VideoData::Create(mInfo,
mImageContainer,
nullptr,
aOffsetInStream,
aDTS.ToMicroseconds(),
aDuration.ToMicroseconds(),
buffer,
true,
aDTS.ToMicroseconds(),
mPicture);
}
示例4: AssertOnManagerThread
void
VideoDecoderChild::SetSeekThreshold(const media::TimeUnit& aTime)
{
AssertOnManagerThread();
if (mCanSend) {
SendSetSeekThreshold(aTime.ToMicroseconds());
}
}
示例5: InvokeAsync
RefPtr<MediaDecoderReader::SeekPromise>
MediaDecoderReaderWrapper::Seek(SeekTarget aTarget, media::TimeUnit aEndTime)
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
aTarget.SetTime(aTarget.GetTime() + StartTime());
return InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__,
&MediaDecoderReader::Seek, aTarget,
aEndTime.ToMicroseconds());
}
示例6: StartTime
void
MediaDecoderReaderWrapper::RequestVideoData(bool aSkipToNextKeyframe,
media::TimeUnit aTimeThreshold)
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
MOZ_ASSERT(!mShutdown);
MOZ_ASSERT(mRequestVideoDataCB, "Request video data without callback!");
// Time the video decode and send this value back to callbacks who accept
// a TimeStamp as its second parameter.
TimeStamp videoDecodeStartTime = TimeStamp::Now();
if (aTimeThreshold.ToMicroseconds() > 0 &&
mStartTimeRendezvous->HaveStartTime()) {
aTimeThreshold += StartTime();
}
auto p = InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__,
&MediaDecoderReader::RequestVideoData,
aSkipToNextKeyframe, aTimeThreshold.ToMicroseconds());
if (!mStartTimeRendezvous->HaveStartTime()) {
p = p->Then(mOwnerThread, __func__, mStartTimeRendezvous.get(),
&StartTimeRendezvous::ProcessFirstSample<MediaData::VIDEO_DATA>,
&StartTimeRendezvous::FirstSampleRejected<MediaData::VIDEO_DATA>)
->CompletionPromise();
}
RefPtr<MediaDecoderReaderWrapper> self = this;
mVideoDataRequest.Begin(p->Then(mOwnerThread, __func__,
[self, videoDecodeStartTime] (MediaData* aVideoSample) {
MOZ_ASSERT(self->mRequestVideoDataCB);
self->mVideoDataRequest.Complete();
self->OnSampleDecoded(self->mRequestVideoDataCB.get(), aVideoSample, videoDecodeStartTime);
},
[self] (MediaDecoderReader::NotDecodedReason aReason) {
MOZ_ASSERT(self->mRequestVideoDataCB);
self->mVideoDataRequest.Complete();
self->OnNotDecoded(self->mRequestVideoDataCB.get(), aReason);
}));
}
示例7: AssertOwnerThread
void
SeekTask::RequestVideoData()
{
AssertOwnerThread();
//These two variables are not used in the SEEKING state.
const bool skipToNextKeyFrame = false;
const media::TimeUnit currentTime = media::TimeUnit::FromMicroseconds(0);
SAMPLE_LOG("Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
!!mSeekedVideoData, mReader->SizeOfVideoQueueInFrames(), skipToNextKeyFrame,
currentTime.ToMicroseconds());
mReader->RequestVideoData(skipToNextKeyFrame, currentTime);
}
示例8: CreateAndResolve
RefPtr<MP4TrackDemuxer::SeekPromise>
MP4TrackDemuxer::Seek(media::TimeUnit aTime)
{
int64_t seekTime = aTime.ToMicroseconds();
mQueuedSample = nullptr;
mIterator->Seek(seekTime);
// Check what time we actually seeked to.
mQueuedSample = mIterator->GetNext();
if (mQueuedSample) {
seekTime = mQueuedSample->mTime;
}
SetNextKeyFrameTime();
return SeekPromise::CreateAndResolve(media::TimeUnit::FromMicroseconds(seekTime), __func__);
}
示例9: mDecoderID
SeekTask::SeekTask(const void* aDecoderID,
AbstractThread* aThread,
MediaDecoderReaderWrapper* aReader,
SeekJob&& aSeekJob,
const MediaInfo& aInfo,
const media::TimeUnit& aDuration,
int64_t aCurrentMediaTime)
: mDecoderID(aDecoderID)
, mOwnerThread(aThread)
, mReader(aReader)
, mSeekJob(Move(aSeekJob))
, mCurrentTimeBeforeSeek(aCurrentMediaTime)
, mAudioRate(aInfo.mAudio.mRate)
, mHasAudio(aInfo.HasAudio())
, mHasVideo(aInfo.HasVideo())
, mDropAudioUntilNextDiscontinuity(false)
, mDropVideoUntilNextDiscontinuity(false)
, mIsDiscarded(false)
, mIsAudioQueueFinished(false)
, mIsVideoQueueFinished(false)
, mNeedToStopPrerollingAudio(false)
, mNeedToStopPrerollingVideo(false)
{
// Bound the seek time to be inside the media range.
int64_t end = aDuration.ToMicroseconds();
NS_ASSERTION(end != -1, "Should know end time by now");
int64_t seekTime = mSeekJob.mTarget.GetTime().ToMicroseconds();
seekTime = std::min(seekTime, end);
seekTime = std::max(int64_t(0), seekTime);
NS_ASSERTION(seekTime >= 0 && seekTime <= end,
"Can only seek in range [0,duration]");
mSeekJob.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime));
mDropAudioUntilNextDiscontinuity = HasAudio();
mDropVideoUntilNextDiscontinuity = HasVideo();
// Configure MediaDecoderReaderWrapper.
SetMediaDecoderReaderWrapperCallback();
}
示例10: SeekTask
AccurateSeekTask::AccurateSeekTask(const void* aDecoderID,
AbstractThread* aThread,
MediaDecoderReaderWrapper* aReader,
const SeekTarget& aTarget,
const MediaInfo& aInfo,
const media::TimeUnit& aEnd,
int64_t aCurrentMediaTime)
: SeekTask(aDecoderID, aThread, aReader, aTarget)
, mCurrentTimeBeforeSeek(media::TimeUnit::FromMicroseconds(aCurrentMediaTime))
, mAudioRate(aInfo.mAudio.mRate)
, mDoneAudioSeeking(!aInfo.HasAudio() || aTarget.IsVideoOnly())
, mDoneVideoSeeking(!aInfo.HasVideo())
{
AssertOwnerThread();
// Bound the seek time to be inside the media range.
NS_ASSERTION(aEnd.ToMicroseconds() != -1, "Should know end time by now");
mTarget.SetTime(std::max(media::TimeUnit(), std::min(mTarget.GetTime(), aEnd)));
// Configure MediaDecoderReaderWrapper.
SetCallbacks();
}
示例11: Output
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
MediaFormat::Param aFormat,
const media::TimeUnit& aDuration) {
// The output on Android is always 16-bit signed
nsresult rv;
int32_t numChannels;
NS_ENSURE_SUCCESS(rv =
aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
int32_t sampleRate;
NS_ENSURE_SUCCESS(rv =
aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);
int32_t size;
NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);
const int32_t numFrames = (size / numChannels) / 2;
AudioDataValue* audio = new AudioDataValue[size];
PodCopy(audio, static_cast<AudioDataValue*>(aBuffer), size);
int32_t offset;
NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
int64_t presentationTimeUs;
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
aDuration.ToMicroseconds(),
numFrames,
audio,
numChannels,
sampleRate);
ENVOKE_CALLBACK(Output, data);
return NS_OK;
}
示例12: LOG
void
AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
mSeekTargetThreshold = Some(aTime);
}
示例13: PostOutput
virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
const media::TimeUnit& aDuration) override {
if (!EnsureGLContext()) {
return NS_ERROR_FAILURE;
}
nsRefPtr<layers::Image> img = mImageContainer->CreateImage(ImageFormat::SURFACE_TEXTURE);
layers::SurfaceTextureImage::Data data;
data.mSurfTex = mSurfaceTexture.get();
data.mSize = mConfig.mDisplay;
data.mOriginPos = gl::OriginPos::BottomLeft;
layers::SurfaceTextureImage* stImg = static_cast<layers::SurfaceTextureImage*>(img.get());
stImg->SetData(data);
if (WantCopy()) {
EGLImage eglImage = CopySurface(img);
if (!eglImage) {
return NS_ERROR_FAILURE;
}
EGLSync eglSync = nullptr;
if (sEGLLibrary.IsExtensionSupported(GLLibraryEGL::KHR_fence_sync) &&
mGLContext->IsExtensionSupported(GLContext::OES_EGL_sync))
{
MOZ_ASSERT(mGLContext->IsCurrent());
eglSync = sEGLLibrary.fCreateSync(EGL_DISPLAY(),
LOCAL_EGL_SYNC_FENCE,
nullptr);
MOZ_ASSERT(eglSync);
mGLContext->fFlush();
} else {
NS_WARNING("No EGL fence support detected, rendering artifacts may occur!");
}
img = mImageContainer->CreateImage(ImageFormat::EGLIMAGE);
layers::EGLImageImage::Data data;
data.mImage = eglImage;
data.mSync = eglSync;
data.mOwns = true;
data.mSize = mConfig.mDisplay;
data.mOriginPos = gl::OriginPos::TopLeft;
layers::EGLImageImage* typedImg = static_cast<layers::EGLImageImage*>(img.get());
typedImg->SetData(data);
}
nsresult rv;
int32_t flags;
NS_ENSURE_SUCCESS(rv = aInfo->Flags(&flags), rv);
bool isSync = !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME);
int32_t offset;
NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
int64_t presentationTimeUs;
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
nsRefPtr<VideoData> v =
VideoData::CreateFromImage(mConfig,
mImageContainer,
offset,
presentationTimeUs,
aDuration.ToMicroseconds(),
img,
isSync,
presentationTimeUs,
gfx::IntRect(0, 0,
mConfig.mDisplay.width,
mConfig.mDisplay.height));
ENVOKE_CALLBACK(Output, v);
return NS_OK;
}
示例14: TimeUnitToFrames
// Format TimeUnit as number of frames at given rate.
CheckedInt64 TimeUnitToFrames(const media::TimeUnit& aTime, uint32_t aRate) {
return UsecsToFrames(aTime.ToMicroseconds(), aRate);
}
示例15: mon
int64_t
MP4TrackDemuxer::GetEvictionOffset(media::TimeUnit aTime)
{
MonitorAutoLock mon(mMonitor);
return int64_t(mIndex->GetEvictionOffset(aTime.ToMicroseconds()));
}