本文整理汇总了C++中MediaResource::IsDataCachedToEndOfResource方法的典型用法代码示例。如果您正苦于以下问题:C++ MediaResource::IsDataCachedToEndOfResource方法的具体用法?C++ MediaResource::IsDataCachedToEndOfResource怎么用?C++ MediaResource::IsDataCachedToEndOfResource使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MediaResource
的用法示例。
在下文中一共展示了MediaResource::IsDataCachedToEndOfResource方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetBuffered
nsresult WebMReader::GetBuffered(nsTimeRanges* aBuffered, int64_t aStartTime)
{
MediaResource* resource = mDecoder->GetResource();
uint64_t timecodeScale;
if (!mContext || nestegg_tstamp_scale(mContext, &timecodeScale) == -1) {
return NS_OK;
}
// Special case completely cached files. This also handles local files.
bool isFullyCached = resource->IsDataCachedToEndOfResource(0);
if (isFullyCached) {
uint64_t duration = 0;
if (nestegg_duration(mContext, &duration) == 0) {
aBuffered->Add(0, duration / NS_PER_S);
}
}
uint32_t bufferedLength = 0;
aBuffered->GetLength(&bufferedLength);
// Either we the file is not fully cached, or we couldn't find a duration in
// the WebM bitstream.
if (!isFullyCached || !bufferedLength) {
MediaResource* resource = mDecoder->GetResource();
nsTArray<MediaByteRange> ranges;
nsresult res = resource->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(res, res);
for (uint32_t index = 0; index < ranges.Length(); index++) {
uint64_t start, end;
bool rv = mBufferedState->CalculateBufferedForRange(ranges[index].mStart,
ranges[index].mEnd,
&start, &end);
if (rv) {
double startTime = start * timecodeScale / NS_PER_S - aStartTime;
double endTime = end * timecodeScale / NS_PER_S - aStartTime;
// If this range extends to the end of the file, the true end time
// is the file's duration.
if (resource->IsDataCachedToEndOfResource(ranges[index].mStart)) {
uint64_t duration = 0;
if (nestegg_duration(mContext, &duration) == 0) {
endTime = duration / NS_PER_S;
}
}
aBuffered->Add(startTime, endTime);
}
}
}
return NS_OK;
}
示例2: IsDataCachedAtEndOfSubsegments
bool WebMReader::IsDataCachedAtEndOfSubsegments()
{
MediaResource* resource = mDecoder->GetResource();
NS_ENSURE_TRUE(resource, false);
if (resource->IsDataCachedToEndOfResource(0)) {
return true;
}
if (mClusterByteRanges.IsEmpty()) {
return false;
}
nsTArray<MediaByteRange> ranges;
nsresult rv = resource->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(rv, false);
if (ranges.IsEmpty()) {
return false;
}
// Return true if data at the end of the final subsegment is cached.
uint32_t finalSubsegmentIndex = mClusterByteRanges.Length()-1;
uint64_t finalSubEndOffset = mClusterByteRanges[finalSubsegmentIndex].mEnd;
uint32_t finalRangeIndex = ranges.Length()-1;
uint64_t finalRangeStartOffset = ranges[finalRangeIndex].mStart;
uint64_t finalRangeEndOffset = ranges[finalRangeIndex].mEnd;
return (finalRangeStartOffset < finalSubEndOffset &&
finalSubEndOffset <= finalRangeEndOffset);
}
示例3: GetBuffered
nsresult GStreamerReader::GetBuffered(dom::TimeRanges* aBuffered,
int64_t aStartTime)
{
if (!mInfo.HasValidMedia()) {
return NS_OK;
}
#if GST_VERSION_MAJOR == 0
GstFormat format = GST_FORMAT_TIME;
#endif
MediaResource* resource = mDecoder->GetResource();
nsTArray<MediaByteRange> ranges;
resource->GetCachedRanges(ranges);
if (resource->IsDataCachedToEndOfResource(0)) {
/* fast path for local or completely cached files */
gint64 duration = 0;
{
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
duration = mDecoder->GetMediaDuration();
}
double end = (double) duration / GST_MSECOND;
LOG(PR_LOG_DEBUG, "complete range [0, %f] for [0, %li]",
end, resource->GetLength());
aBuffered->Add(0, end);
return NS_OK;
}
for(uint32_t index = 0; index < ranges.Length(); index++) {
int64_t startOffset = ranges[index].mStart;
int64_t endOffset = ranges[index].mEnd;
gint64 startTime, endTime;
#if GST_VERSION_MAJOR >= 1
if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
startOffset, GST_FORMAT_TIME, &startTime))
continue;
if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
endOffset, GST_FORMAT_TIME, &endTime))
continue;
#else
if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
startOffset, &format, &startTime) || format != GST_FORMAT_TIME)
continue;
if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
endOffset, &format, &endTime) || format != GST_FORMAT_TIME)
continue;
#endif
double start = (double) GST_TIME_AS_USECONDS (startTime) / GST_MSECOND;
double end = (double) GST_TIME_AS_USECONDS (endTime) / GST_MSECOND;
LOG(PR_LOG_DEBUG, "adding range [%f, %f] for [%li %li] size %li",
start, end, startOffset, endOffset, resource->GetLength());
aBuffered->Add(start, end);
}
return NS_OK;
}
示例4: GetBuffered
nsresult nsWebMReader::GetBuffered(nsTimeRanges* aBuffered, PRInt64 aStartTime)
{
MediaResource* resource = mDecoder->GetResource();
uint64_t timecodeScale;
if (!mContext || nestegg_tstamp_scale(mContext, &timecodeScale) == -1) {
return NS_OK;
}
// Special case completely cached files. This also handles local files.
if (resource->IsDataCachedToEndOfResource(0)) {
uint64_t duration = 0;
if (nestegg_duration(mContext, &duration) == 0) {
aBuffered->Add(0, duration / NS_PER_S);
}
} else {
MediaResource* resource = mDecoder->GetResource();
nsTArray<MediaByteRange> ranges;
nsresult res = resource->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(res, res);
PRInt64 startTimeOffsetNS = aStartTime * NS_PER_USEC;
for (PRUint32 index = 0; index < ranges.Length(); index++) {
mBufferedState->CalculateBufferedForRange(aBuffered,
ranges[index].mStart,
ranges[index].mEnd,
timecodeScale,
startTimeOffsetNS);
}
}
return NS_OK;
}
示例5: PlayBinSourceSetup
void GStreamerReader::PlayBinSourceSetup(GstAppSrc* aSource)
{
mSource = GST_APP_SRC(aSource);
gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, nullptr);
MediaResource* resource = mDecoder->GetResource();
/* do a short read to trigger a network request so that GetLength() below
* returns something meaningful and not -1
*/
char buf[512];
unsigned int size = 0;
resource->Read(buf, sizeof(buf), &size);
resource->Seek(SEEK_SET, 0);
/* now we should have a length */
int64_t resourceLength = resource->GetLength();
gst_app_src_set_size(mSource, resourceLength);
if (resource->IsDataCachedToEndOfResource(0) ||
(resourceLength != -1 && resourceLength <= SHORT_FILE_SIZE)) {
/* let the demuxer work in pull mode for local files (or very short files)
* so that we get optimal seeking accuracy/performance
*/
LOG(PR_LOG_DEBUG, ("configuring random access, len %lld", resourceLength));
gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_RANDOM_ACCESS);
} else {
/* make the demuxer work in push mode so that seeking is kept to a minimum
*/
LOG(PR_LOG_DEBUG, ("configuring push mode, len %lld", resourceLength));
gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_SEEKABLE);
}
}
示例6: GetBuffered
nsresult nsGStreamerReader::GetBuffered(nsTimeRanges* aBuffered,
int64_t aStartTime)
{
if (!mInfo.mHasVideo && !mInfo.mHasAudio) {
return NS_OK;
}
GstFormat format = GST_FORMAT_TIME;
MediaResource* resource = mDecoder->GetResource();
gint64 resourceLength = resource->GetLength();
nsTArray<MediaByteRange> ranges;
resource->GetCachedRanges(ranges);
if (mDecoder->OnStateMachineThread())
/* Report the position from here while buffering as we can't report it from
* the gstreamer threads that are actually reading from the resource
*/
NotifyBytesConsumed();
if (resource->IsDataCachedToEndOfResource(0)) {
/* fast path for local or completely cached files */
gint64 duration = 0;
GstFormat format = GST_FORMAT_TIME;
duration = QueryDuration();
double end = (double) duration / GST_MSECOND;
LOG(PR_LOG_DEBUG, ("complete range [0, %f] for [0, %li]",
end, resourceLength));
aBuffered->Add(0, end);
return NS_OK;
}
for(uint32_t index = 0; index < ranges.Length(); index++) {
int64_t startOffset = ranges[index].mStart;
int64_t endOffset = ranges[index].mEnd;
gint64 startTime, endTime;
if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
startOffset, &format, &startTime) || format != GST_FORMAT_TIME)
continue;
if (!gst_element_query_convert(GST_ELEMENT(mPlayBin), GST_FORMAT_BYTES,
endOffset, &format, &endTime) || format != GST_FORMAT_TIME)
continue;
double start = start = (double) GST_TIME_AS_USECONDS (startTime) / GST_MSECOND;
double end = (double) GST_TIME_AS_USECONDS (endTime) / GST_MSECOND;
LOG(PR_LOG_DEBUG, ("adding range [%f, %f] for [%li %li] size %li",
start, end, startOffset, endOffset, resourceLength));
aBuffered->Add(start, end);
}
return NS_OK;
}
示例7: PlayBinSourceSetup
void nsGStreamerReader::PlayBinSourceSetup(GstAppSrc *aSource)
{
mSource = GST_APP_SRC(aSource);
gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, NULL);
MediaResource* resource = mDecoder->GetResource();
int64_t len = resource->GetLength();
gst_app_src_set_size(mSource, len);
if (resource->IsDataCachedToEndOfResource(0) ||
(len != -1 && len <= SHORT_FILE_SIZE)) {
/* let the demuxer work in pull mode for local files (or very short files)
* so that we get optimal seeking accuracy/performance
*/
LOG(PR_LOG_ERROR, ("configuring random access"));
gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_RANDOM_ACCESS);
} else {
/* make the demuxer work in push mode so that seeking is kept to a minimum
*/
gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_SEEKABLE);
}
}
示例8: GetBuffered
nsresult WebMReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
{
MediaResource* resource = mDecoder->GetResource();
uint64_t timecodeScale;
if (!mContext || nestegg_tstamp_scale(mContext, &timecodeScale) == -1) {
return NS_OK;
}
// Special case completely cached files. This also handles local files.
bool isFullyCached = resource->IsDataCachedToEndOfResource(0);
if (isFullyCached) {
uint64_t duration = 0;
if (nestegg_duration(mContext, &duration) == 0) {
aBuffered->Add(0, duration / NS_PER_S);
}
}
uint32_t bufferedLength = 0;
aBuffered->GetLength(&bufferedLength);
// Either we the file is not fully cached, or we couldn't find a duration in
// the WebM bitstream.
if (!isFullyCached || !bufferedLength) {
MediaResource* resource = mDecoder->GetResource();
nsTArray<MediaByteRange> ranges;
nsresult res = resource->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(res, res);
for (uint32_t index = 0; index < ranges.Length(); index++) {
uint64_t start, end;
bool rv = mBufferedState->CalculateBufferedForRange(ranges[index].mStart,
ranges[index].mEnd,
&start, &end);
if (rv) {
double startTime = start * timecodeScale / NS_PER_S - aStartTime;
double endTime = end * timecodeScale / NS_PER_S - aStartTime;
#ifdef MOZ_DASH
// If this range extends to the end of a cluster, the true end time is
// the cluster's end timestamp. Since WebM frames do not have an end
// timestamp, a fully cached cluster must be reported with the correct
// end time of its final frame. Otherwise, buffered ranges could be
// reported with missing frames at cluster boundaries, specifically
// boundaries where stream switching has occurred.
if (!mClusterByteRanges.IsEmpty()) {
for (uint32_t clusterIndex = 0;
clusterIndex < (mClusterByteRanges.Length()-1);
clusterIndex++) {
if (ranges[index].mEnd >= mClusterByteRanges[clusterIndex].mEnd) {
double clusterEndTime =
mClusterByteRanges[clusterIndex+1].mStartTime / USEC_PER_S;
if (endTime < clusterEndTime) {
LOG(PR_LOG_DEBUG, ("End of cluster: endTime becoming %0.3fs",
clusterEndTime));
endTime = clusterEndTime;
}
}
}
}
#endif
// If this range extends to the end of the file, the true end time
// is the file's duration.
if (resource->IsDataCachedToEndOfResource(ranges[index].mStart)) {
uint64_t duration = 0;
if (nestegg_duration(mContext, &duration) == 0) {
endTime = duration / NS_PER_S;
}
}
aBuffered->Add(startTime, endTime);
}
}
}
return NS_OK;
}