本文整理汇总了Java中com.google.android.exoplayer.util.Util.scaleLargeTimestamp方法的典型用法代码示例。如果您正苦于以下问题:Java Util.scaleLargeTimestamp方法的具体用法?Java Util.scaleLargeTimestamp怎么用?Java Util.scaleLargeTimestamp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.google.android.exoplayer.util.Util
的用法示例。
在下文中一共展示了Util.scaleLargeTimestamp方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: StreamElement
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
public StreamElement(String baseUri, String chunkTemplate, int type, String subType,
long timescale, String name, int qualityLevels, int maxWidth, int maxHeight,
int displayWidth, int displayHeight, String language, TrackElement[] tracks,
List<Long> chunkStartTimes, long lastChunkDuration) {
this.baseUri = baseUri;
this.chunkTemplate = chunkTemplate;
this.type = type;
this.subType = subType;
this.timescale = timescale;
this.name = name;
this.qualityLevels = qualityLevels;
this.maxWidth = maxWidth;
this.maxHeight = maxHeight;
this.displayWidth = displayWidth;
this.displayHeight = displayHeight;
this.language = language;
this.tracks = tracks;
this.chunkCount = chunkStartTimes.size();
this.chunkStartTimes = chunkStartTimes;
lastChunkDurationUs =
Util.scaleLargeTimestamp(lastChunkDuration, C.MICROS_PER_SECOND, timescale);
chunkStartTimesUs =
Util.scaleLargeTimestamps(chunkStartTimes, C.MICROS_PER_SECOND, timescale);
}
示例2: parseTrak
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Parses a trak atom (defined in 14496-12).
*
* @param trak Atom to parse.
* @param mvhd Movie header atom, used to get the timescale.
* @return A {@link Track} instance, or {@code null} if the track's type isn't supported.
*/
public static Track parseTrak(Atom.ContainerAtom trak, Atom.LeafAtom mvhd) {
Atom.ContainerAtom mdia = trak.getContainerAtomOfType(Atom.TYPE_mdia);
int trackType = parseHdlr(mdia.getLeafAtomOfType(Atom.TYPE_hdlr).data);
if (trackType != Track.TYPE_AUDIO && trackType != Track.TYPE_VIDEO
&& trackType != Track.TYPE_TEXT && trackType != Track.TYPE_SUBTITLE) {
return null;
}
Pair<Integer, Long> header = parseTkhd(trak.getLeafAtomOfType(Atom.TYPE_tkhd).data);
int id = header.first;
long duration = header.second;
long movieTimescale = parseMvhd(mvhd.data);
long durationUs;
if (duration == -1) {
durationUs = C.UNKNOWN_TIME_US;
} else {
durationUs = Util.scaleLargeTimestamp(duration, C.MICROS_PER_SECOND, movieTimescale);
}
Atom.ContainerAtom stbl = mdia.getContainerAtomOfType(Atom.TYPE_minf)
.getContainerAtomOfType(Atom.TYPE_stbl);
long mediaTimescale = parseMdhd(mdia.getLeafAtomOfType(Atom.TYPE_mdhd).data);
StsdDataHolder stsdData = parseStsd(stbl.getLeafAtomOfType(Atom.TYPE_stsd).data, durationUs);
return stsdData.mediaFormat == null ? null
: new Track(id, trackType, mediaTimescale, durationUs, stsdData.mediaFormat,
stsdData.trackEncryptionBoxes, stsdData.nalUnitLengthFieldLength);
}
示例3: create
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Returns a {@link XingSeeker} for seeking in the stream, if required information is present.
* Returns {@code null} if not. On returning, {@code frame}'s position is not specified so the
* caller should reset it.
*
* @param mpegAudioHeader The MPEG audio header associated with the frame.
* @param frame The data in this audio frame, with its position set to immediately after the
* 'XING' or 'INFO' tag.
* @param position The position (byte offset) of the start of this frame in the stream.
* @param inputLength The length of the stream in bytes.
* @return A {@link XingSeeker} for seeking in the stream, or {@code null} if the required
* information is not present.
*/
public static XingSeeker create(MpegAudioHeader mpegAudioHeader, ParsableByteArray frame,
long position, long inputLength) {
int samplesPerFrame = mpegAudioHeader.samplesPerFrame;
int sampleRate = mpegAudioHeader.sampleRate;
long firstFramePosition = position + mpegAudioHeader.frameSize;
int flags = frame.readInt();
// Frame count, size and table of contents are required to use this header.
if ((flags & 0x07) != 0x07) {
return null;
}
// Read frame count, as (flags & 1) == 1.
int frameCount = frame.readUnsignedIntToInt();
if (frameCount == 0) {
return null;
}
long durationUs =
Util.scaleLargeTimestamp(frameCount, samplesPerFrame * 1000000L, sampleRate);
// Read size in bytes, as (flags & 2) == 2.
long sizeBytes = frame.readUnsignedIntToInt();
// Read table-of-contents as (flags & 4) == 4.
frame.skipBytes(1);
long[] tableOfContents = new long[99];
for (int i = 0; i < 99; i++) {
tableOfContents[i] = frame.readUnsignedByte();
}
// TODO: Handle encoder delay and padding in 3 bytes offset by xingBase + 213 bytes:
// delay = (frame.readUnsignedByte() << 4) + (frame.readUnsignedByte() >> 4);
// padding = ((frame.readUnsignedByte() & 0x0F) << 8) + frame.readUnsignedByte();
return new XingSeeker(tableOfContents, firstFramePosition, sizeBytes, durationUs, inputLength);
}
示例4: getSegmentTimeUs
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* @see DashSegmentIndex#getTimeUs(int)
*/
public final long getSegmentTimeUs(int sequenceNumber) {
long unscaledSegmentTime;
if (segmentTimeline != null) {
unscaledSegmentTime = segmentTimeline.get(sequenceNumber - startNumber).startTime
- presentationTimeOffset;
} else {
unscaledSegmentTime = (sequenceNumber - startNumber) * duration;
}
return Util.scaleLargeTimestamp(unscaledSegmentTime, C.MICROS_PER_SECOND, timescale);
}
示例5: parseTrak
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Parses a trak atom (defined in 14496-12).
*
* @param trak Atom to parse.
* @param mvhd Movie header atom, used to get the timescale.
* @param duration The duration in units of the timescale declared in the mvhd atom, or -1 if the
* duration should be parsed from the tkhd atom.
* @param isQuickTime True for QuickTime media. False otherwise.
* @return A {@link Track} instance, or {@code null} if the track's type isn't supported.
*/
public static Track parseTrak(Atom.ContainerAtom trak, Atom.LeafAtom mvhd, long duration,
boolean isQuickTime) {
Atom.ContainerAtom mdia = trak.getContainerAtomOfType(Atom.TYPE_mdia);
int trackType = parseHdlr(mdia.getLeafAtomOfType(Atom.TYPE_hdlr).data);
if (trackType != Track.TYPE_soun && trackType != Track.TYPE_vide && trackType != Track.TYPE_text
&& trackType != Track.TYPE_sbtl && trackType != Track.TYPE_subt) {
return null;
}
TkhdData tkhdData = parseTkhd(trak.getLeafAtomOfType(Atom.TYPE_tkhd).data);
if (duration == -1) {
duration = tkhdData.duration;
}
long movieTimescale = parseMvhd(mvhd.data);
long durationUs;
if (duration == -1) {
durationUs = C.UNKNOWN_TIME_US;
} else {
durationUs = Util.scaleLargeTimestamp(duration, C.MICROS_PER_SECOND, movieTimescale);
}
Atom.ContainerAtom stbl = mdia.getContainerAtomOfType(Atom.TYPE_minf)
.getContainerAtomOfType(Atom.TYPE_stbl);
Pair<Long, String> mdhdData = parseMdhd(mdia.getLeafAtomOfType(Atom.TYPE_mdhd).data);
StsdData stsdData = parseStsd(stbl.getLeafAtomOfType(Atom.TYPE_stsd).data, tkhdData.id,
durationUs, tkhdData.rotationDegrees, mdhdData.second, isQuickTime);
Pair<long[], long[]> edtsData = parseEdts(trak.getContainerAtomOfType(Atom.TYPE_edts));
return stsdData.mediaFormat == null ? null
: new Track(tkhdData.id, trackType, mdhdData.first, movieTimescale, durationUs,
stsdData.mediaFormat, stsdData.trackEncryptionBoxes, stsdData.nalUnitLengthFieldLength,
edtsData.first, edtsData.second);
}
示例6: create
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Returns a {@link XingSeeker} for seeking in the stream, if required information is present.
* Returns {@code null} if not. On returning, {@code frame}'s position is not specified so the
* caller should reset it.
*
* @param mpegAudioHeader The MPEG audio header associated with the frame.
* @param frame The data in this audio frame, with its position set to immediately after the
* 'Xing' or 'Info' tag.
* @param position The position (byte offset) of the start of this frame in the stream.
* @param inputLength The length of the stream in bytes.
* @return A {@link XingSeeker} for seeking in the stream, or {@code null} if the required
* information is not present.
*/
public static XingSeeker create(MpegAudioHeader mpegAudioHeader, ParsableByteArray frame,
long position, long inputLength) {
int samplesPerFrame = mpegAudioHeader.samplesPerFrame;
int sampleRate = mpegAudioHeader.sampleRate;
long firstFramePosition = position + mpegAudioHeader.frameSize;
int flags = frame.readInt();
int frameCount;
if ((flags & 0x01) != 0x01 || (frameCount = frame.readUnsignedIntToInt()) == 0) {
// If the frame count is missing/invalid, the header can't be used to determine the duration.
return null;
}
long durationUs = Util.scaleLargeTimestamp(frameCount, samplesPerFrame * C.MICROS_PER_SECOND,
sampleRate);
if ((flags & 0x06) != 0x06) {
// If the size in bytes or table of contents is missing, the stream is not seekable.
return new XingSeeker(firstFramePosition, durationUs, inputLength);
}
long sizeBytes = frame.readUnsignedIntToInt();
frame.skipBytes(1);
long[] tableOfContents = new long[99];
for (int i = 0; i < 99; i++) {
tableOfContents[i] = frame.readUnsignedByte();
}
// TODO: Handle encoder delay and padding in 3 bytes offset by xingBase + 213 bytes:
// delay = (frame.readUnsignedByte() << 4) + (frame.readUnsignedByte() >> 4);
// padding = ((frame.readUnsignedByte() & 0x0F) << 8) + frame.readUnsignedByte();
return new XingSeeker(firstFramePosition, durationUs, inputLength, tableOfContents,
sizeBytes, mpegAudioHeader.frameSize);
}
示例7: parseSidx
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Parses a sidx atom (defined in 14496-12).
*/
private static ChunkIndex parseSidx(ParsableByteArray atom, long inputPosition) {
atom.setPosition(Atom.HEADER_SIZE);
int fullAtom = atom.readInt();
int version = Atom.parseFullAtomVersion(fullAtom);
atom.skipBytes(4);
long timescale = atom.readUnsignedInt();
long earliestPresentationTime;
long offset = inputPosition;
if (version == 0) {
earliestPresentationTime = atom.readUnsignedInt();
offset += atom.readUnsignedInt();
} else {
earliestPresentationTime = atom.readUnsignedLongToLong();
offset += atom.readUnsignedLongToLong();
}
atom.skipBytes(2);
int referenceCount = atom.readUnsignedShort();
int[] sizes = new int[referenceCount];
long[] offsets = new long[referenceCount];
long[] durationsUs = new long[referenceCount];
long[] timesUs = new long[referenceCount];
long time = earliestPresentationTime;
long timeUs = Util.scaleLargeTimestamp(time, C.MICROS_PER_SECOND, timescale);
for (int i = 0; i < referenceCount; i++) {
int firstInt = atom.readInt();
int type = 0x80000000 & firstInt;
if (type != 0) {
throw new IllegalStateException("Unhandled indirect reference");
}
long referenceDuration = atom.readUnsignedInt();
sizes[i] = 0x7fffffff & firstInt;
offsets[i] = offset;
// Calculate time and duration values such that any rounding errors are consistent. i.e. That
// timesUs[i] + durationsUs[i] == timesUs[i + 1].
timesUs[i] = timeUs;
time += referenceDuration;
timeUs = Util.scaleLargeTimestamp(time, C.MICROS_PER_SECOND, timescale);
durationsUs[i] = timeUs - timesUs[i];
atom.skipBytes(4);
offset += sizes[i];
}
return new ChunkIndex(sizes, offsets, durationsUs, timesUs);
}
示例8: create
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Returns a {@link VbriSeeker} for seeking in the stream, if required information is present.
* Returns {@code null} if not. On returning, {@code frame}'s position is not specified so the
* caller should reset it.
*
* @param mpegAudioHeader The MPEG audio header associated with the frame.
* @param frame The data in this audio frame, with its position set to immediately after the
* 'VBRI' tag.
* @param position The position (byte offset) of the start of this frame in the stream.
* @return A {@link VbriSeeker} for seeking in the stream, or {@code null} if the required
* information is not present.
*/
public static VbriSeeker create(MpegAudioHeader mpegAudioHeader, ParsableByteArray frame,
long position) {
frame.skipBytes(10);
int numFrames = frame.readInt();
if (numFrames <= 0) {
return null;
}
int sampleRate = mpegAudioHeader.sampleRate;
long durationUs = Util.scaleLargeTimestamp(
numFrames, 1000000L * (sampleRate >= 32000 ? 1152 : 576), sampleRate);
int numEntries = frame.readUnsignedShort();
int scale = frame.readUnsignedShort();
int entrySize = frame.readUnsignedShort();
// Read entries in the VBRI header.
long[] timesUs = new long[numEntries];
long[] offsets = new long[numEntries];
long segmentDurationUs = durationUs / numEntries;
long now = 0;
int segmentIndex = 0;
while (segmentIndex < numEntries) {
int numBytes;
switch (entrySize) {
case 1:
numBytes = frame.readUnsignedByte();
break;
case 2:
numBytes = frame.readUnsignedShort();
break;
case 3:
numBytes = frame.readUnsignedInt24();
break;
case 4:
numBytes = frame.readUnsignedIntToInt();
break;
default:
return null;
}
now += segmentDurationUs;
timesUs[segmentIndex] = now;
position += numBytes * scale;
offsets[segmentIndex] = position;
segmentIndex++;
}
return new VbriSeeker(timesUs, offsets, position + mpegAudioHeader.frameSize, durationUs);
}
示例9: scaleTimecodeToUs
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
private long scaleTimecodeToUs(long unscaledTimecode) throws ParserException {
if (timecodeScale == C.UNKNOWN_TIME_US) {
throw new ParserException("Can't scale timecode prior to timecodeScale being set.");
}
return Util.scaleLargeTimestamp(unscaledTimecode, timecodeScale, 1000);
}
示例10: parseSidx
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Parses a sidx atom (defined in 14496-12).
*/
private static ChunkIndex parseSidx(ParsableByteArray atom, long inputPosition)
throws ParserException {
atom.setPosition(Atom.HEADER_SIZE);
int fullAtom = atom.readInt();
int version = Atom.parseFullAtomVersion(fullAtom);
atom.skipBytes(4);
long timescale = atom.readUnsignedInt();
long earliestPresentationTime;
long offset = inputPosition;
if (version == 0) {
earliestPresentationTime = atom.readUnsignedInt();
offset += atom.readUnsignedInt();
} else {
earliestPresentationTime = atom.readUnsignedLongToLong();
offset += atom.readUnsignedLongToLong();
}
atom.skipBytes(2);
int referenceCount = atom.readUnsignedShort();
int[] sizes = new int[referenceCount];
long[] offsets = new long[referenceCount];
long[] durationsUs = new long[referenceCount];
long[] timesUs = new long[referenceCount];
long time = earliestPresentationTime;
long timeUs = Util.scaleLargeTimestamp(time, C.MICROS_PER_SECOND, timescale);
for (int i = 0; i < referenceCount; i++) {
int firstInt = atom.readInt();
int type = 0x80000000 & firstInt;
if (type != 0) {
throw new ParserException("Unhandled indirect reference");
}
long referenceDuration = atom.readUnsignedInt();
sizes[i] = 0x7FFFFFFF & firstInt;
offsets[i] = offset;
// Calculate time and duration values such that any rounding errors are consistent. i.e. That
// timesUs[i] + durationsUs[i] == timesUs[i + 1].
timesUs[i] = timeUs;
time += referenceDuration;
timeUs = Util.scaleLargeTimestamp(time, C.MICROS_PER_SECOND, timescale);
durationsUs[i] = timeUs - timesUs[i];
atom.skipBytes(4);
offset += sizes[i];
}
return new ChunkIndex(sizes, offsets, durationsUs, timesUs);
}
示例11: create
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Returns a {@link VbriSeeker} for seeking in the stream, if required information is present.
* Returns {@code null} if not. On returning, {@code frame}'s position is not specified so the
* caller should reset it.
*
* @param mpegAudioHeader The MPEG audio header associated with the frame.
* @param frame The data in this audio frame, with its position set to immediately after the
* 'VBRI' tag.
* @param position The position (byte offset) of the start of this frame in the stream.
* @param inputLength The length of the stream in bytes.
* @return A {@link VbriSeeker} for seeking in the stream, or {@code null} if the required
* information is not present.
*/
public static VbriSeeker create(MpegAudioHeader mpegAudioHeader, ParsableByteArray frame,
long position, long inputLength) {
frame.skipBytes(10);
int numFrames = frame.readInt();
if (numFrames <= 0) {
return null;
}
int sampleRate = mpegAudioHeader.sampleRate;
long durationUs = Util.scaleLargeTimestamp(numFrames,
C.MICROS_PER_SECOND * (sampleRate >= 32000 ? 1152 : 576), sampleRate);
int entryCount = frame.readUnsignedShort();
int scale = frame.readUnsignedShort();
int entrySize = frame.readUnsignedShort();
frame.skipBytes(2);
// Skip the frame containing the VBRI header.
position += mpegAudioHeader.frameSize;
// Read table of contents entries.
long[] timesUs = new long[entryCount + 1];
long[] positions = new long[entryCount + 1];
timesUs[0] = 0L;
positions[0] = position;
for (int index = 1; index < timesUs.length; index++) {
int segmentSize;
switch (entrySize) {
case 1:
segmentSize = frame.readUnsignedByte();
break;
case 2:
segmentSize = frame.readUnsignedShort();
break;
case 3:
segmentSize = frame.readUnsignedInt24();
break;
case 4:
segmentSize = frame.readUnsignedIntToInt();
break;
default:
return null;
}
position += segmentSize * scale;
timesUs[index] = index * durationUs / entryCount;
positions[index] =
inputLength == C.LENGTH_UNBOUNDED ? position : Math.min(inputLength, position);
}
return new VbriSeeker(timesUs, positions, durationUs);
}
示例12: getVideoTimestampUs
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/** Returns a video timestamp in microseconds corresponding to {@code timeUnits}. */
private static long getVideoTimestampUs(int timeUnits) {
return Util.scaleLargeTimestamp(timeUnits, C.MICROS_PER_SECOND, TIMESCALE);
}
示例13: SmoothStreamingManifest
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* @param majorVersion The client manifest major version.
* @param minorVersion The client manifest minor version.
* @param timescale The timescale of the media as the number of units that pass in one second.
* @param duration The overall presentation duration in units of the timescale attribute, or 0
* if the duration is unknown.
* @param dvrWindowLength The length of the trailing window in units of the timescale attribute,
* or 0 if this attribute is unspecified or not applicable.
* @param lookAheadCount The number of fragments in a lookahead, or -1 if this attribute is
* unspecified or not applicable.
* @param isLive True if the manifest describes a live presentation still in progress. False
* otherwise.
* @param protectionElement Content protection information, or null if the content is not
* protected.
* @param streamElements The contained stream elements.
*/
public SmoothStreamingManifest(int majorVersion, int minorVersion, long timescale, long duration,
long dvrWindowLength, int lookAheadCount, boolean isLive, ProtectionElement protectionElement,
StreamElement[] streamElements) {
this.majorVersion = majorVersion;
this.minorVersion = minorVersion;
this.lookAheadCount = lookAheadCount;
this.isLive = isLive;
this.protectionElement = protectionElement;
this.streamElements = streamElements;
dvrWindowLengthUs = dvrWindowLength == 0 ? C.UNKNOWN_TIME_US
: Util.scaleLargeTimestamp(dvrWindowLength, C.MICROS_PER_SECOND, timescale);
durationUs = duration == 0 ? C.UNKNOWN_TIME_US
: Util.scaleLargeTimestamp(duration, C.MICROS_PER_SECOND, timescale);
}
示例14: getPresentationTimeOffsetUs
import com.google.android.exoplayer.util.Util; //导入方法依赖的package包/类
/**
* Gets the presentation time offset, in microseconds.
*
* @return The presentation time offset, in microseconds.
*/
public long getPresentationTimeOffsetUs() {
return Util.scaleLargeTimestamp(presentationTimeOffset, C.MICROS_PER_SECOND, timescale);
}