本文整理匯總了Java中com.google.android.exoplayer2.util.Util.scaleLargeTimestamp方法的典型用法代碼示例。如果您正苦於以下問題:Java Util.scaleLargeTimestamp方法的具體用法?Java Util.scaleLargeTimestamp怎麽用?Java Util.scaleLargeTimestamp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類com.google.android.exoplayer2.util.Util
的用法示例。
在下文中一共展示了Util.scaleLargeTimestamp方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: parseTrak
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
/**
* Parses a trak atom (defined in 14496-12).
*
* @param trak Atom to decode.
* @param mvhd Movie header atom, used to get the timescale.
* @param duration The duration in units of the timescale declared in the mvhd atom, or
* {@link C#TIME_UNSET} if the duration should be parsed from the tkhd atom.
* @param drmInitData {@link DrmInitData} to be included in the format.
* @param isQuickTime True for QuickTime media. False otherwise.
* @return A {@link Track} instance, or {@code null} if the track's type isn't supported.
*/
public static Track parseTrak(Atom.ContainerAtom trak, Atom.LeafAtom mvhd, long duration,
DrmInitData drmInitData, boolean isQuickTime) throws ParserException {
Atom.ContainerAtom mdia = trak.getContainerAtomOfType(Atom.TYPE_mdia);
int trackType = parseHdlr(mdia.getLeafAtomOfType(Atom.TYPE_hdlr).data);
if (trackType == C.TRACK_TYPE_UNKNOWN) {
return null;
}
TkhdData tkhdData = parseTkhd(trak.getLeafAtomOfType(Atom.TYPE_tkhd).data);
if (duration == C.TIME_UNSET) {
duration = tkhdData.duration;
}
long movieTimescale = parseMvhd(mvhd.data);
long durationUs;
if (duration == C.TIME_UNSET) {
durationUs = C.TIME_UNSET;
} else {
durationUs = Util.scaleLargeTimestamp(duration, C.MICROS_PER_SECOND, movieTimescale);
}
Atom.ContainerAtom stbl = mdia.getContainerAtomOfType(Atom.TYPE_minf)
.getContainerAtomOfType(Atom.TYPE_stbl);
Pair<Long, String> mdhdData = parseMdhd(mdia.getLeafAtomOfType(Atom.TYPE_mdhd).data);
StsdData stsdData = parseStsd(stbl.getLeafAtomOfType(Atom.TYPE_stsd).data, tkhdData.id,
tkhdData.rotationDegrees, mdhdData.second, drmInitData, isQuickTime);
Pair<long[], long[]> edtsData = parseEdts(trak.getContainerAtomOfType(Atom.TYPE_edts));
return stsdData.format == null ? null
: new Track(tkhdData.id, trackType, mdhdData.first, movieTimescale, durationUs,
stsdData.format, stsdData.requiredSampleTransformation, stsdData.trackEncryptionBoxes,
stsdData.nalUnitLengthFieldLength, edtsData.first, edtsData.second);
}
示例2: onEmsgLeafAtomRead
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
/**
* Handles an emsg atom (defined in 23009-1).
*/
private void onEmsgLeafAtomRead(ParsableByteArray atom) {
if (eventMessageTrackOutput == null) {
return;
}
// Parse the event's presentation time delta.
atom.setPosition(Atom.FULL_HEADER_SIZE);
atom.readNullTerminatedString(); // schemeIdUri
atom.readNullTerminatedString(); // value
long timescale = atom.readUnsignedInt();
long presentationTimeDeltaUs =
Util.scaleLargeTimestamp(atom.readUnsignedInt(), C.MICROS_PER_SECOND, timescale);
// Output the sample data.
atom.setPosition(Atom.FULL_HEADER_SIZE);
int sampleSize = atom.bytesLeft();
eventMessageTrackOutput.sampleData(atom, sampleSize);
// Output the sample metadata.
if (segmentIndexEarliestPresentationTimeUs != C.TIME_UNSET) {
// We can output the sample metadata immediately.
eventMessageTrackOutput.sampleMetadata(
segmentIndexEarliestPresentationTimeUs + presentationTimeDeltaUs,
C.BUFFER_FLAG_KEY_FRAME, sampleSize, 0 /* offset */, null);
} else {
// We need the first sample timestamp in the segment before we can output the metadata.
pendingMetadataSampleInfos.addLast(
new MetadataSampleInfo(presentationTimeDeltaUs, sampleSize));
pendingMetadataSampleBytes += sampleSize;
}
}
示例3: create
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
/**
* Returns a {@link XingSeeker} for seeking in the stream, if required information is present.
* Returns {@code null} if not. On returning, {@code frame}'s position is not specified so the
* caller should reset it.
*
* @param mpegAudioHeader The MPEG audio header associated with the frame.
* @param frame The data in this audio frame, with its position set to immediately after the
* 'Xing' or 'Info' tag.
* @param position The position (byte offset) of the start of this frame in the stream.
* @param inputLength The length of the stream in bytes.
* @return A {@link XingSeeker} for seeking in the stream, or {@code null} if the required
* information is not present.
*/
public static XingSeeker create(MpegAudioHeader mpegAudioHeader, ParsableByteArray frame,
long position, long inputLength) {
int samplesPerFrame = mpegAudioHeader.samplesPerFrame;
int sampleRate = mpegAudioHeader.sampleRate;
long firstFramePosition = position + mpegAudioHeader.frameSize;
int flags = frame.readInt();
int frameCount;
if ((flags & 0x01) != 0x01 || (frameCount = frame.readUnsignedIntToInt()) == 0) {
// If the frame count is missing/invalid, the header can't be used to determine the duration.
return null;
}
long durationUs = Util.scaleLargeTimestamp(frameCount, samplesPerFrame * C.MICROS_PER_SECOND,
sampleRate);
if ((flags & 0x06) != 0x06) {
// If the size in bytes or table of contents is missing, the stream is not seekable.
return new XingSeeker(firstFramePosition, durationUs, inputLength);
}
long sizeBytes = frame.readUnsignedIntToInt();
frame.skipBytes(1);
long[] tableOfContents = new long[99];
for (int i = 0; i < 99; i++) {
tableOfContents[i] = frame.readUnsignedByte();
}
// TODO: Handle encoder delay and padding in 3 bytes offset by xingBase + 213 bytes:
// delay = (frame.readUnsignedByte() << 4) + (frame.readUnsignedByte() >> 4);
// padding = ((frame.readUnsignedByte() & 0x0F) << 8) + frame.readUnsignedByte();
return new XingSeeker(firstFramePosition, durationUs, inputLength, tableOfContents,
sizeBytes, mpegAudioHeader.frameSize);
}
示例4: applySpeedup
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
/**
* Returns the underlying audio track {@code positionUs} with any applicable speedup applied.
*/
private long applySpeedup(long positionUs) {
while (!playbackParametersCheckpoints.isEmpty()
&& positionUs >= playbackParametersCheckpoints.getFirst().positionUs) {
// We are playing (or about to play) media with the new playback parameters, so update them.
PlaybackParametersCheckpoint checkpoint = playbackParametersCheckpoints.remove();
playbackParameters = checkpoint.playbackParameters;
playbackParametersPositionUs = checkpoint.positionUs;
playbackParametersOffsetUs = checkpoint.mediaTimeUs - startMediaTimeUs;
}
if (playbackParameters.speed == 1f) {
return positionUs + playbackParametersOffsetUs - playbackParametersPositionUs;
}
if (playbackParametersCheckpoints.isEmpty()
&& sonicAudioProcessor.getOutputByteCount() >= SONIC_MIN_BYTES_FOR_SPEEDUP) {
return playbackParametersOffsetUs
+ Util.scaleLargeTimestamp(positionUs - playbackParametersPositionUs,
sonicAudioProcessor.getInputByteCount(), sonicAudioProcessor.getOutputByteCount());
}
// We are playing drained data at a previous playback speed, or don't have enough bytes to
// calculate an accurate speedup, so fall back to multiplying by the speed.
return playbackParametersOffsetUs
+ (long) ((double) playbackParameters.speed * (positionUs - playbackParametersPositionUs));
}
示例5: parseSidx
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
/**
* Parses a sidx atom (defined in 14496-12).
*
* @param atom The atom data.
* @param inputPosition The input position of the first byte after the atom.
* @return A pair consisting of the earliest presentation time in microseconds, and the parsed
* {@link ChunkIndex}.
*/
private static Pair<Long, ChunkIndex> parseSidx(ParsableByteArray atom, long inputPosition)
throws ParserException {
atom.setPosition(Atom.HEADER_SIZE);
int fullAtom = atom.readInt();
int version = Atom.parseFullAtomVersion(fullAtom);
atom.skipBytes(4);
long timescale = atom.readUnsignedInt();
long earliestPresentationTime;
long offset = inputPosition;
if (version == 0) {
earliestPresentationTime = atom.readUnsignedInt();
offset += atom.readUnsignedInt();
} else {
earliestPresentationTime = atom.readUnsignedLongToLong();
offset += atom.readUnsignedLongToLong();
}
long earliestPresentationTimeUs = Util.scaleLargeTimestamp(earliestPresentationTime,
C.MICROS_PER_SECOND, timescale);
atom.skipBytes(2);
int referenceCount = atom.readUnsignedShort();
int[] sizes = new int[referenceCount];
long[] offsets = new long[referenceCount];
long[] durationsUs = new long[referenceCount];
long[] timesUs = new long[referenceCount];
long time = earliestPresentationTime;
long timeUs = earliestPresentationTimeUs;
for (int i = 0; i < referenceCount; i++) {
int firstInt = atom.readInt();
int type = 0x80000000 & firstInt;
if (type != 0) {
throw new ParserException("Unhandled indirect reference");
}
long referenceDuration = atom.readUnsignedInt();
sizes[i] = 0x7FFFFFFF & firstInt;
offsets[i] = offset;
// Calculate time and duration values such that any rounding errors are consistent. i.e. That
// timesUs[i] + durationsUs[i] == timesUs[i + 1].
timesUs[i] = timeUs;
time += referenceDuration;
timeUs = Util.scaleLargeTimestamp(time, C.MICROS_PER_SECOND, timescale);
durationsUs[i] = timeUs - timesUs[i];
atom.skipBytes(4);
offset += sizes[i];
}
return Pair.create(earliestPresentationTimeUs,
new ChunkIndex(sizes, offsets, durationsUs, timesUs));
}
示例6: scaleTimecodeToUs
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
private long scaleTimecodeToUs(long unscaledTimecode) throws ParserException {
if (timecodeScale == C.TIME_UNSET) {
throw new ParserException("Can't scale timecode prior to timecodeScale being set.");
}
return Util.scaleLargeTimestamp(unscaledTimecode, timecodeScale, 1000);
}
示例7: create
import com.google.android.exoplayer2.util.Util; //導入方法依賴的package包/類
/**
* Returns a {@link VbriSeeker} for seeking in the stream, if required information is present.
* Returns {@code null} if not. On returning, {@code frame}'s position is not specified so the
* caller should reset it.
*
* @param mpegAudioHeader The MPEG audio header associated with the frame.
* @param frame The data in this audio frame, with its position set to immediately after the
* 'VBRI' tag.
* @param position The position (byte offset) of the start of this frame in the stream.
* @param inputLength The length of the stream in bytes.
* @return A {@link VbriSeeker} for seeking in the stream, or {@code null} if the required
* information is not present.
*/
public static VbriSeeker create(MpegAudioHeader mpegAudioHeader, ParsableByteArray frame,
long position, long inputLength) {
frame.skipBytes(10);
int numFrames = frame.readInt();
if (numFrames <= 0) {
return null;
}
int sampleRate = mpegAudioHeader.sampleRate;
long durationUs = Util.scaleLargeTimestamp(numFrames,
C.MICROS_PER_SECOND * (sampleRate >= 32000 ? 1152 : 576), sampleRate);
int entryCount = frame.readUnsignedShort();
int scale = frame.readUnsignedShort();
int entrySize = frame.readUnsignedShort();
frame.skipBytes(2);
// Skip the frame containing the VBRI header.
position += mpegAudioHeader.frameSize;
// Read table of contents entries.
long[] timesUs = new long[entryCount + 1];
long[] positions = new long[entryCount + 1];
timesUs[0] = 0L;
positions[0] = position;
for (int index = 1; index < timesUs.length; index++) {
int segmentSize;
switch (entrySize) {
case 1:
segmentSize = frame.readUnsignedByte();
break;
case 2:
segmentSize = frame.readUnsignedShort();
break;
case 3:
segmentSize = frame.readUnsignedInt24();
break;
case 4:
segmentSize = frame.readUnsignedIntToInt();
break;
default:
return null;
}
position += segmentSize * scale;
timesUs[index] = index * durationUs / entryCount;
positions[index] =
inputLength == C.LENGTH_UNSET ? position : Math.min(inputLength, position);
}
return new VbriSeeker(timesUs, positions, durationUs);
}