本文整理汇总了Java中com.google.android.exoplayer.C.MICROS_PER_SECOND属性的典型用法代码示例。如果您正苦于以下问题:Java C.MICROS_PER_SECOND属性的具体用法?Java C.MICROS_PER_SECOND怎么用?Java C.MICROS_PER_SECOND使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类com.google.android.exoplayer.C
的用法示例。
在下文中一共展示了C.MICROS_PER_SECOND属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: HlsMediaPlaylist
public HlsMediaPlaylist(String baseUri, int mediaSequence, int targetDurationSecs, int version,
boolean live, List<Segment> segments) {
super(baseUri, HlsPlaylist.TYPE_MEDIA);
this.mediaSequence = mediaSequence;
this.targetDurationSecs = targetDurationSecs;
this.version = version;
this.live = live;
this.segments = segments;
if (!segments.isEmpty()) {
Segment last = segments.get(segments.size() - 1);
durationUs = last.startTimeUs + (long) (last.durationSecs * C.MICROS_PER_SECOND);
} else {
durationUs = 0;
}
}
示例2: if
/**
* Adjusts a PTS value to the corresponding time in microseconds, accounting for PTS wraparound.
*
* @param pts The raw PTS value.
* @return The corresponding time in microseconds.
*/
/* package */ long ptsToTimeUs(long pts) {
if (lastPts != Long.MIN_VALUE) {
// The wrap count for the current PTS may be closestWrapCount or (closestWrapCount - 1),
// and we need to snap to the one closest to lastPts.
long closestWrapCount = (lastPts + (MAX_PTS / 2)) / MAX_PTS;
long ptsWrapBelow = pts + (MAX_PTS * (closestWrapCount - 1));
long ptsWrapAbove = pts + (MAX_PTS * closestWrapCount);
pts = Math.abs(ptsWrapBelow - lastPts) < Math.abs(ptsWrapAbove - lastPts)
? ptsWrapBelow : ptsWrapAbove;
}
// Calculate the corresponding timestamp.
long timeUs = (pts * C.MICROS_PER_SECOND) / 90000;
// If we haven't done the initial timestamp adjustment, do it now.
if (lastPts == Long.MIN_VALUE) {
timestampOffsetUs = firstSampleTimestampUs - timeUs;
}
// Record the adjusted PTS to adjust for wraparound next time.
lastPts = pts;
return timeUs + timestampOffsetUs;
}
示例3: parseHeader
/**
* Parses the sample header.
*/
private void parseHeader() {
if (mediaFormat == null) {
mediaFormat = isEac3
? Ac3Util.parseEac3SyncframeFormat(headerScratchBits, null, C.UNKNOWN_TIME_US, null)
: Ac3Util.parseAc3SyncframeFormat(headerScratchBits, null, C.UNKNOWN_TIME_US, null);
output.format(mediaFormat);
}
sampleSize = isEac3 ? Ac3Util.parseEAc3SyncframeSize(headerScratchBits.data)
: Ac3Util.parseAc3SyncframeSize(headerScratchBits.data);
int audioSamplesPerSyncframe = isEac3
? Ac3Util.parseEAc3SyncframeAudioSampleCount(headerScratchBits.data)
: Ac3Util.getAc3SyncframeAudioSampleCount();
// In this class a sample is an access unit (syncframe in AC-3), but the MediaFormat sample rate
// specifies the number of PCM audio samples per second.
sampleDurationUs =
(int) (C.MICROS_PER_SECOND * audioSamplesPerSyncframe / mediaFormat.sampleRate);
}
示例4: getPosition
@Override
public long getPosition(long timeUs) {
if (timeUs == 0) {
targetGranule = -1;
return audioStartPosition;
}
targetGranule = vorbisSetup.idHeader.sampleRate * timeUs / C.MICROS_PER_SECOND;
return Math.max(audioStartPosition, ((inputLength - audioStartPosition) * timeUs
/ duration) - 4000);
}
示例5: parseHeader
/**
* Parses the sample header.
*/
private void parseHeader() {
adtsScratch.setPosition(0);
if (!hasOutputFormat) {
int audioObjectType = adtsScratch.readBits(2) + 1;
int sampleRateIndex = adtsScratch.readBits(4);
adtsScratch.skipBits(1);
int channelConfig = adtsScratch.readBits(3);
byte[] audioSpecificConfig = CodecSpecificDataUtil.buildAacAudioSpecificConfig(
audioObjectType, sampleRateIndex, channelConfig);
Pair<Integer, Integer> audioParams = CodecSpecificDataUtil.parseAacAudioSpecificConfig(
audioSpecificConfig);
MediaFormat mediaFormat = MediaFormat.createAudioFormat(MimeTypes.AUDIO_AAC,
MediaFormat.NO_VALUE, audioParams.second, audioParams.first,
Collections.singletonList(audioSpecificConfig));
frameDurationUs = (C.MICROS_PER_SECOND * 1024L) / mediaFormat.sampleRate;
output.format(mediaFormat);
hasOutputFormat = true;
} else {
adtsScratch.skipBits(10);
}
adtsScratch.skipBits(4);
sampleSize = adtsScratch.readBits(13) - 2 /* the sync word */ - HEADER_SIZE;
if (hasCrc) {
sampleSize -= CRC_SIZE;
}
}
示例6: getPlaybackHeadPosition
/**
* {@link android.media.AudioTrack#getPlaybackHeadPosition()} returns a value intended to be
* interpreted as an unsigned 32 bit integer, which also wraps around periodically. This method
* returns the playback head position as a long that will only wrap around if the value exceeds
* {@link Long#MAX_VALUE} (which in practice will never happen).
*
* @return {@link android.media.AudioTrack#getPlaybackHeadPosition()} of {@link #audioTrack}
* expressed as a long.
*/
public long getPlaybackHeadPosition() {
if (stopTimestampUs != -1) {
// Simulate the playback head position up to the total number of frames submitted.
long elapsedTimeSinceStopUs = (SystemClock.elapsedRealtime() * 1000) - stopTimestampUs;
long framesSinceStop = (elapsedTimeSinceStopUs * sampleRate) / C.MICROS_PER_SECOND;
return Math.min(endPlaybackHeadPosition, stopPlaybackHeadPosition + framesSinceStop);
}
int state = audioTrack.getPlayState();
if (state == android.media.AudioTrack.PLAYSTATE_STOPPED) {
// The audio track hasn't been started.
return 0;
}
long rawPlaybackHeadPosition = 0xFFFFFFFFL & audioTrack.getPlaybackHeadPosition();
if (needsPassthroughWorkaround) {
// Work around an issue with passthrough/direct AudioTracks on platform API versions 21/22
// where the playback head position jumps back to zero on paused passthrough/direct audio
// tracks. See [Internal: b/19187573].
if (state == android.media.AudioTrack.PLAYSTATE_PAUSED && rawPlaybackHeadPosition == 0) {
passthroughWorkaroundPauseOffset = lastRawPlaybackHeadPosition;
}
rawPlaybackHeadPosition += passthroughWorkaroundPauseOffset;
}
if (lastRawPlaybackHeadPosition > rawPlaybackHeadPosition) {
// The value must have wrapped around.
rawPlaybackHeadWrapCount++;
}
lastRawPlaybackHeadPosition = rawPlaybackHeadPosition;
return rawPlaybackHeadPosition + (rawPlaybackHeadWrapCount << 32);
}
示例7: getSegmentDurationUs
/**
* @see DashSegmentIndex#getDurationUs(int)
*/
public final long getSegmentDurationUs(int sequenceNumber) {
if (segmentTimeline != null) {
long duration = segmentTimeline.get(sequenceNumber - startNumber).duration;
return (duration * C.MICROS_PER_SECOND) / timescale;
} else {
return sequenceNumber == getLastSegmentNum()
? ((periodDurationMs * 1000) - getSegmentTimeUs(sequenceNumber))
: ((duration * C.MICROS_PER_SECOND) / timescale);
}
}
示例8: getLastSegmentNum
@Override
public int getLastSegmentNum(long periodDurationUs) {
if (segmentTimeline != null) {
return segmentTimeline.size() + startNumber - 1;
} else if (periodDurationUs == C.UNKNOWN_TIME_US) {
return DashSegmentIndex.INDEX_UNBOUNDED;
} else {
long durationUs = (duration * C.MICROS_PER_SECOND) / timescale;
return startNumber + (int) Util.ceilDivide(periodDurationUs, durationUs) - 1;
}
}
示例9: readHeaderRemainder
/**
* Attempts to read the remaining two bytes of the frame header.
* <p>
* If a frame header is read in full then the state is changed to {@link #STATE_READING_FRAME},
* the media format is output if this has not previously occurred, the four header bytes are
* output as sample data, and the position of the source is advanced to the byte that immediately
* follows the header.
* <p>
* If a frame header is read in full but cannot be parsed then the state is changed to
* {@link #STATE_READING_HEADER}.
* <p>
* If a frame header is not read in full then the position of the source is advanced to the limit,
* and the method should be called again with the next source to continue the read.
*
* @param source The source from which to read.
*/
private void readHeaderRemainder(ParsableByteArray source) {
int bytesToRead = Math.min(source.bytesLeft(), HEADER_SIZE - frameBytesRead);
source.readBytes(headerScratch.data, frameBytesRead, bytesToRead);
frameBytesRead += bytesToRead;
if (frameBytesRead < HEADER_SIZE) {
// We haven't read the whole header yet.
return;
}
headerScratch.setPosition(0);
boolean parsedHeader = MpegAudioHeader.populateHeader(headerScratch.readInt(), header);
if (!parsedHeader) {
// We thought we'd located a frame header, but we hadn't.
frameBytesRead = 0;
state = STATE_READING_HEADER;
return;
}
frameSize = header.frameSize;
if (!hasOutputFormat) {
frameDurationUs = (C.MICROS_PER_SECOND * header.samplesPerFrame) / header.sampleRate;
MediaFormat mediaFormat = MediaFormat.createAudioFormat(null, header.mimeType,
MediaFormat.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, C.UNKNOWN_TIME_US,
header.channels, header.sampleRate, null, null);
output.format(mediaFormat);
hasOutputFormat = true;
}
headerScratch.setPosition(0);
output.sampleData(headerScratch, HEADER_SIZE);
state = STATE_READING_FRAME;
}
示例10: getSegmentDurationUs
/**
* @see DashSegmentIndex#getDurationUs(int, long)
*/
public final long getSegmentDurationUs(int sequenceNumber, long periodDurationUs) {
if (segmentTimeline != null) {
long duration = segmentTimeline.get(sequenceNumber - startNumber).duration;
return (duration * C.MICROS_PER_SECOND) / timescale;
} else {
return sequenceNumber == getLastSegmentNum(periodDurationUs)
? (periodDurationUs - getSegmentTimeUs(sequenceNumber))
: ((duration * C.MICROS_PER_SECOND) / timescale);
}
}
示例11: parseTimeExpression
/**
* Parses a time expression, returning the parsed timestamp.
* <p>
* For the format of a time expression, see:
* <a href="http://www.w3.org/TR/ttaf1-dfxp/#timing-value-timeExpression">timeExpression</a>
*
* @param time A string that includes the time expression.
* @param frameRate The frame rate of the stream.
* @param subframeRate The sub-frame rate of the stream
* @param tickRate The tick rate of the stream.
* @return The parsed timestamp in microseconds.
* @throws ParserException If the given string does not contain a valid time expression.
*/
private static long parseTimeExpression(String time, int frameRate, int subframeRate,
int tickRate) throws ParserException {
Matcher matcher = CLOCK_TIME.matcher(time);
if (matcher.matches()) {
String hours = matcher.group(1);
double durationSeconds = Long.parseLong(hours) * 3600;
String minutes = matcher.group(2);
durationSeconds += Long.parseLong(minutes) * 60;
String seconds = matcher.group(3);
durationSeconds += Long.parseLong(seconds);
String fraction = matcher.group(4);
durationSeconds += (fraction != null) ? Double.parseDouble(fraction) : 0;
String frames = matcher.group(5);
durationSeconds += (frames != null) ? ((double) Long.parseLong(frames)) / frameRate : 0;
String subframes = matcher.group(6);
durationSeconds += (subframes != null) ?
((double) Long.parseLong(subframes)) / subframeRate / frameRate : 0;
return (long) (durationSeconds * C.MICROS_PER_SECOND);
}
matcher = OFFSET_TIME.matcher(time);
if (matcher.matches()) {
String timeValue = matcher.group(1);
double offsetSeconds = Double.parseDouble(timeValue);
String unit = matcher.group(2);
if (unit.equals("h")) {
offsetSeconds *= 3600;
} else if (unit.equals("m")) {
offsetSeconds *= 60;
} else if (unit.equals("s")) {
// Do nothing.
} else if (unit.equals("ms")) {
offsetSeconds /= 1000;
} else if (unit.equals("f")) {
offsetSeconds /= frameRate;
} else if (unit.equals("t")) {
offsetSeconds /= tickRate;
}
return (long) (offsetSeconds * C.MICROS_PER_SECOND);
}
throw new ParserException("Malformed time expression: " + time);
}
示例12: framesToDurationUs
private long framesToDurationUs(float frameCount) {
return (long)((frameCount * C.MICROS_PER_SECOND) / sampleRate);
}
示例13: getTimeUs
/** Returns the time in microseconds for the given position in bytes in this WAV. */
public long getTimeUs(long position) {
return position * C.MICROS_PER_SECOND / averageBytesPerSecond;
}
示例14: getPlaybackHeadPositionUs
/**
* Returns {@link #getPlaybackHeadPosition()} expressed as microseconds.
*/
public long getPlaybackHeadPositionUs() {
return (getPlaybackHeadPosition() * C.MICROS_PER_SECOND) / sampleRate;
}
示例15: framesToDurationUs
private long framesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / sampleRate;
}