本文整理汇总了C++中TimeUnit::ToMicroseconds方法的典型用法代码示例。如果您正苦于以下问题:C++ TimeUnit::ToMicroseconds方法的具体用法?C++ TimeUnit::ToMicroseconds怎么用?C++ TimeUnit::ToMicroseconds使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TimeUnit
的用法示例。
在下文中一共展示了TimeUnit::ToMicroseconds方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: stream
TimeIntervals
MP3TrackDemuxer::GetBuffered() {
AutoPinned<MediaResource> stream(mSource.GetResource());
TimeIntervals buffered;
if (Duration() > TimeUnit() && stream->IsDataCachedToEndOfResource(0)) {
// Special case completely cached files. This also handles local files.
buffered += TimeInterval(TimeUnit(), Duration());
MP3LOGV("buffered = [[%" PRId64 ", %" PRId64 "]]",
TimeUnit().ToMicroseconds(), Duration().ToMicroseconds());
return buffered;
}
MediaByteRangeSet ranges;
nsresult rv = stream->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(rv, buffered);
for (const auto& range: ranges) {
if (range.IsEmpty()) {
continue;
}
TimeUnit start = Duration(FrameIndexFromOffset(range.mStart));
TimeUnit end = Duration(FrameIndexFromOffset(range.mEnd));
MP3LOGV("buffered += [%" PRId64 ", %" PRId64 "]",
start.ToMicroseconds(), end.ToMicroseconds());
buffered += TimeInterval(start, end);
}
return buffered;
}
示例2:
TEST(MP4Demuxer, GetNextKeyframe)
{
RefPtr<MP4DemuxerBinding> binding = new MP4DemuxerBinding("gizmo-frag.mp4");
binding->RunTestAndWait([binding]() {
// Insert a [0,end] buffered range, to simulate Moof's being buffered
// via MSE.
auto len = binding->resource->GetLength();
binding->resource->MockAddBufferedRange(0, len);
// gizmp-frag has two keyframes; one at dts=cts=0, and another at
// dts=cts=1000000. Verify we get expected results.
TimeUnit time;
binding->mVideoTrack =
binding->mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
binding->mVideoTrack->Reset();
binding->mVideoTrack->GetNextRandomAccessPoint(&time);
EXPECT_EQ(time.ToMicroseconds(), 0);
binding->mVideoTrack->GetSamples()->Then(
binding->mTaskQueue, __func__,
[binding]() {
TimeUnit time;
binding->mVideoTrack->GetNextRandomAccessPoint(&time);
EXPECT_EQ(time.ToMicroseconds(), 1000000);
binding->mTaskQueue->BeginShutdown();
},
DO_FAIL);
});
}
示例3: FastSeek
TimeUnit
MP3TrackDemuxer::ScanUntil(const TimeUnit& aTime) {
MP3LOG("ScanUntil(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames,
mFrameIndex, mOffset);
if (!aTime.ToMicroseconds()) {
return FastSeek(aTime);
}
if (Duration(mFrameIndex) > aTime) {
FastSeek(aTime);
}
if (Duration(mFrameIndex + 1) > aTime) {
return SeekPosition();
}
MediaByteRange nextRange = FindNextFrame();
while (SkipNextFrame(nextRange) && Duration(mFrameIndex + 1) < aTime) {
nextRange = FindNextFrame();
MP3LOGV("ScanUntil* avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64 " Duration=%" PRId64,
AverageFrameLength(), mNumParsedFrames,
mFrameIndex, mOffset, Duration(mFrameIndex + 1).ToMicroseconds());
}
MP3LOG("ScanUntil End avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
AverageFrameLength(), mNumParsedFrames, mFrameIndex, mOffset);
return SeekPosition();
}
示例4: Duration
TimeUnit
MP3TrackDemuxer::FastSeek(const TimeUnit& aTime) {
MP3LOG("FastSeek(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
aTime, AverageFrameLength(), mNumParsedFrames, mFrameIndex, mOffset);
const auto& vbr = mParser.VBRInfo();
if (!aTime.ToMicroseconds()) {
// Quick seek to the beginning of the stream.
mFrameIndex = 0;
} else if (vbr.IsTOCPresent()) {
// Use TOC for more precise seeking.
const float durationFrac = static_cast<float>(aTime.ToMicroseconds()) /
Duration().ToMicroseconds();
mFrameIndex = FrameIndexFromOffset(vbr.Offset(durationFrac));
} else if (AverageFrameLength() > 0) {
mFrameIndex = FrameIndexFromTime(aTime);
}
mOffset = OffsetFromFrameIndex(mFrameIndex);
if (mOffset > mFirstFrameOffset && StreamLength() > 0) {
mOffset = std::min(StreamLength() - 1, mOffset);
}
mParser.EndFrameSession();
MP3LOG("FastSeek End TOC=%d avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mFirstFrameOffset=%llu mOffset=%" PRIu64
" SL=%llu NumBytes=%u",
vbr.IsTOCPresent(), AverageFrameLength(), mNumParsedFrames, mFrameIndex,
mFirstFrameOffset, mOffset, StreamLength(), vbr.NumBytes().valueOr(0));
return Duration(mFrameIndex);
}
示例5: nsPrintfCString
nsCString
DecodedStreamData::GetDebugInfo()
{
return nsPrintfCString(
"DecodedStreamData=%p mPlaying=%d mAudioFramesWritten=%" PRId64
" mNextAudioTime=%" PRId64 " mNextVideoTime=%" PRId64 " mHaveSentFinish=%d "
"mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
this, mPlaying, mAudioFramesWritten, mNextAudioTime.ToMicroseconds(),
mNextVideoTime.ToMicroseconds(), mHaveSentFinish, mHaveSentFinishAudio,
mHaveSentFinishVideo);
}
示例6: TimeUnit
TimeUnit
MP3TrackDemuxer::FastSeek(TimeUnit aTime) {
MP3DEMUXER_LOG("FastSeek(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
" mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
aTime, AverageFrameLength(), mNumParsedFrames, mFrameIndex,
mOffset);
if (!aTime.ToMicroseconds()) {
// Quick seek to the beginning of the stream.
mOffset = mFirstFrameOffset;
mFrameIndex = 0;
mParser.EndFrameSession();
return TimeUnit();
}
if (!mSamplesPerFrame || !mNumParsedFrames) {
return TimeUnit::FromMicroseconds(-1);
}
const int64_t numFrames = aTime.ToSeconds() *
mSamplesPerSecond / mSamplesPerFrame;
mOffset = mFirstFrameOffset + numFrames * AverageFrameLength();
mFrameIndex = numFrames;
MP3DEMUXER_LOG("FastSeek mSamplesPerSecond=%d mSamplesPerFrame=%d "
"numFrames=%" PRId64,
mSamplesPerSecond, mSamplesPerFrame, numFrames);
mParser.EndFrameSession();
return Duration(mFrameIndex);
}
示例7: PostOutput
nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
const TimeUnit& aDuration) override
{
RefPtr<layers::Image> img =
new SurfaceTextureImage(mSurfaceTexture.get(), mConfig.mDisplay,
gl::OriginPos::BottomLeft);
nsresult rv;
int32_t flags;
NS_ENSURE_SUCCESS(rv = aInfo->Flags(&flags), rv);
bool isSync = !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME);
int32_t offset;
NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
int64_t presentationTimeUs;
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
RefPtr<VideoData> v =
VideoData::CreateFromImage(mConfig,
mImageContainer,
offset,
presentationTimeUs,
aDuration.ToMicroseconds(),
img,
isSync,
presentationTimeUs,
gfx::IntRect(0, 0,
mConfig.mDisplay.width,
mConfig.mDisplay.height));
INVOKE_CALLBACK(Output, v);
return NS_OK;
}
示例8:
static void
WriteVideoToMediaStream(MediaStream* aStream,
layers::Image* aImage,
const TimeUnit& aEnd,
const TimeUnit& aStart,
const mozilla::gfx::IntSize& aIntrinsicSize,
const TimeStamp& aTimeStamp,
VideoSegment* aOutput,
const PrincipalHandle& aPrincipalHandle)
{
RefPtr<layers::Image> image = aImage;
auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
auto start = aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
StreamTime duration = end - start;
aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize,
aPrincipalHandle, false, aTimeStamp);
}
示例9: TimeIntervals
TimeIntervals
MP3TrackDemuxer::GetBuffered() {
TimeUnit duration = Duration();
if (duration <= TimeUnit()) {
return TimeIntervals();
}
AutoPinned<MediaResource> stream(mSource.GetResource());
return GetEstimatedBufferedTimeRanges(stream, duration.ToMicroseconds());
}
示例10: DurationChanged
void MediaDecoder::DurationChanged(TimeUnit aNewDuration)
{
MOZ_ASSERT(NS_IsMainThread());
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
int64_t oldDuration = mDuration;
mDuration = aNewDuration.ToMicroseconds();
// Duration has changed so we should recompute playback rate
UpdatePlaybackRate();
SetInfinite(mDuration == -1);
if (mOwner && oldDuration != mDuration && !IsInfinite()) {
DECODER_LOG("Duration changed to %lld", mDuration);
mOwner->DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
}
if (CurrentPosition() > aNewDuration.ToMicroseconds()) {
Seek(aNewDuration.ToSeconds(), SeekTarget::Accurate);
}
}
示例11: FastSeek
TimeUnit
WAVTrackDemuxer::ScanUntil(const TimeUnit& aTime)
{
if (!aTime.ToMicroseconds()) {
return FastSeek(aTime);
}
if (Duration(mChunkIndex) > aTime) {
FastSeek(aTime);
}
return SeekPosition();
}
示例12: Output
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
MediaFormat::Param aFormat, const TimeUnit& aDuration)
{
// The output on Android is always 16-bit signed
nsresult rv;
int32_t numChannels;
NS_ENSURE_SUCCESS(rv =
aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
AudioConfig::ChannelLayout layout(numChannels);
if (!layout.IsValid()) {
return NS_ERROR_FAILURE;
}
int32_t sampleRate;
NS_ENSURE_SUCCESS(rv =
aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);
int32_t size;
NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);
int32_t offset;
NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
#ifdef MOZ_SAMPLE_TYPE_S16
const int32_t numSamples = size / 2;
#else
#error We only support 16-bit integer PCM
#endif
const int32_t numFrames = numSamples / numChannels;
AlignedAudioBuffer audio(numSamples);
if (!audio) {
return NS_ERROR_OUT_OF_MEMORY;
}
const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart),
numSamples);
int64_t presentationTimeUs;
NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
aDuration.ToMicroseconds(),
numFrames,
Move(audio),
numChannels,
sampleRate);
INVOKE_CALLBACK(Output, data);
return NS_OK;
}
示例13: FastSeek
TimeUnit
MP3TrackDemuxer::ScanUntil(TimeUnit aTime) {
if (!aTime.ToMicroseconds()) {
return FastSeek(aTime);
}
if (Duration(mFrameIndex) > aTime) {
FastSeek(aTime);
}
MediaByteRange nextRange = FindNextFrame();
while (SkipNextFrame(nextRange) && Duration(mFrameIndex + 1) < aTime) {
nextRange = FindNextFrame();
}
return Duration(mFrameIndex);
}
示例14: Duration
TimeUnit
WAVTrackDemuxer::FastSeek(const TimeUnit& aTime)
{
if (aTime.ToMicroseconds()) {
mChunkIndex = ChunkIndexFromTime(aTime);
} else {
mChunkIndex = 0;
}
mOffset = OffsetFromChunkIndex(mChunkIndex);
if (mOffset > mFirstChunkOffset && StreamLength() > 0) {
mOffset = std::min(StreamLength() - 1, mOffset);
}
return Duration(mChunkIndex);
}
示例15: TimeUnit
TimeUnit
MP3TrackDemuxer::FastSeek(TimeUnit aTime) {
if (!aTime.ToMicroseconds()) {
// Quick seek to the beginning of the stream.
mOffset = mFirstFrameOffset;
mFrameIndex = 0;
mParser.EndFrameSession();
return TimeUnit();
}
if (!mSamplesPerFrame || !mNumParsedFrames) {
return TimeUnit::FromMicroseconds(-1);
}
const int64_t numFrames = aTime.ToSeconds() *
mSamplesPerSecond / mSamplesPerFrame;
mOffset = mFirstFrameOffset + numFrames * AverageFrameLength();
mFrameIndex = numFrames;
mParser.EndFrameSession();
return Duration(mFrameIndex);
}