本文整理汇总了Java中com.google.android.exoplayer2.C.MICROS_PER_SECOND属性的典型用法代码示例。如果您正苦于以下问题:Java C.MICROS_PER_SECOND属性的具体用法?Java C.MICROS_PER_SECOND怎么用?Java C.MICROS_PER_SECOND使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类com.google.android.exoplayer2.C
的用法示例。
在下文中一共展示了C.MICROS_PER_SECOND属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parseHeader
/**
* Parses the sample header.
*/
@SuppressWarnings("ReferenceEquality")
private void parseHeader() {
headerScratchBits.setPosition(0);
Ac3Util.Ac3SyncFrameInfo frameInfo = Ac3Util.parseAc3SyncframeInfo(headerScratchBits);
if (format == null || frameInfo.channelCount != format.channelCount
|| frameInfo.sampleRate != format.sampleRate
|| frameInfo.mimeType != format.sampleMimeType) {
format = Format.createAudioSampleFormat(trackFormatId, frameInfo.mimeType, null,
Format.NO_VALUE, Format.NO_VALUE, frameInfo.channelCount, frameInfo.sampleRate, null,
null, 0, language);
output.format(format);
}
sampleSize = frameInfo.frameSize;
// In this class a sample is an access unit (syncframe in AC-3), but the MediaFormat sample rate
// specifies the number of PCM audio samples per second.
sampleDurationUs = C.MICROS_PER_SECOND * frameInfo.sampleCount / format.sampleRate;
}
示例2: parsePayload
@Override
protected void parsePayload(ParsableByteArray data, long timeUs) throws ParserException {
int nameType = readAmfType(data);
if (nameType != AMF_TYPE_STRING) {
// Should never happen.
throw new ParserException();
}
String name = readAmfString(data);
if (!NAME_METADATA.equals(name)) {
// We're only interested in metadata.
return;
}
int type = readAmfType(data);
if (type != AMF_TYPE_ECMA_ARRAY) {
// We're not interested in this metadata.
return;
}
// Set the duration to the value contained in the metadata, if present.
Map<String, Object> metadata = readAmfEcmaArray(data);
if (metadata.containsKey(KEY_DURATION)) {
double durationSeconds = (double) metadata.get(KEY_DURATION);
if (durationSeconds > 0.0) {
durationUs = (long) (durationSeconds * C.MICROS_PER_SECOND);
}
}
}
示例3: getAdGroupTimesUs
private static long[] getAdGroupTimesUs(List<Float> cuePoints) {
if (cuePoints.isEmpty()) {
// If no cue points are specified, there is a preroll ad.
return new long[] {0};
}
int count = cuePoints.size();
long[] adGroupTimesUs = new long[count];
for (int i = 0; i < count; i++) {
double cuePoint = cuePoints.get(i);
adGroupTimesUs[i] =
cuePoint == -1.0 ? C.TIME_END_OF_SOURCE : (long) (C.MICROS_PER_SECOND * cuePoint);
}
return adGroupTimesUs;
}
示例4: getPlaybackHeadPosition
/**
* {@link android.media.AudioTrack#getPlaybackHeadPosition()} returns a value intended to be
* interpreted as an unsigned 32 bit integer, which also wraps around periodically. This method
* returns the playback head position as a long that will only wrap around if the value exceeds
* {@link Long#MAX_VALUE} (which in practice will never happen).
*
* @return The playback head position, in frames.
*/
public long getPlaybackHeadPosition() {
if (stopTimestampUs != C.TIME_UNSET) {
// Simulate the playback head position up to the total number of frames submitted.
long elapsedTimeSinceStopUs = (SystemClock.elapsedRealtime() * 1000) - stopTimestampUs;
long framesSinceStop = (elapsedTimeSinceStopUs * sampleRate) / C.MICROS_PER_SECOND;
return Math.min(endPlaybackHeadPosition, stopPlaybackHeadPosition + framesSinceStop);
}
int state = audioTrack.getPlayState();
if (state == PLAYSTATE_STOPPED) {
// The audio track hasn't been started.
return 0;
}
long rawPlaybackHeadPosition = 0xFFFFFFFFL & audioTrack.getPlaybackHeadPosition();
if (needsPassthroughWorkaround) {
// Work around an issue with passthrough/direct AudioTracks on platform API versions 21/22
// where the playback head position jumps back to zero on paused passthrough/direct audio
// tracks. See [Internal: b/19187573].
if (state == PLAYSTATE_PAUSED && rawPlaybackHeadPosition == 0) {
passthroughWorkaroundPauseOffset = lastRawPlaybackHeadPosition;
}
rawPlaybackHeadPosition += passthroughWorkaroundPauseOffset;
}
if (lastRawPlaybackHeadPosition > rawPlaybackHeadPosition) {
// The value must have wrapped around.
rawPlaybackHeadWrapCount++;
}
lastRawPlaybackHeadPosition = rawPlaybackHeadPosition;
return rawPlaybackHeadPosition + (rawPlaybackHeadWrapCount << 32);
}
示例5: readHeaderRemainder
/**
* Attempts to read the remaining two bytes of the frame header.
* <p>
* If a frame header is read in full then the state is changed to {@link #STATE_READING_FRAME},
* the media format is output if this has not previously occurred, the four header bytes are
* output as sample data, and the position of the source is advanced to the byte that immediately
* follows the header.
* <p>
* If a frame header is read in full but cannot be parsed then the state is changed to
* {@link #STATE_READING_HEADER}.
* <p>
* If a frame header is not read in full then the position of the source is advanced to the limit,
* and the method should be called again with the next source to continue the read.
*
* @param source The source from which to read.
*/
private void readHeaderRemainder(ParsableByteArray source) {
int bytesToRead = Math.min(source.bytesLeft(), HEADER_SIZE - frameBytesRead);
source.readBytes(headerScratch.data, frameBytesRead, bytesToRead);
frameBytesRead += bytesToRead;
if (frameBytesRead < HEADER_SIZE) {
// We haven't read the whole header yet.
return;
}
headerScratch.setPosition(0);
boolean parsedHeader = MpegAudioHeader.populateHeader(headerScratch.readInt(), header);
if (!parsedHeader) {
// We thought we'd located a frame header, but we hadn't.
frameBytesRead = 0;
state = STATE_READING_HEADER;
return;
}
frameSize = header.frameSize;
if (!hasOutputFormat) {
frameDurationUs = (C.MICROS_PER_SECOND * header.samplesPerFrame) / header.sampleRate;
Format format = Format.createAudioSampleFormat(formatId, header.mimeType, null,
Format.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, header.channels, header.sampleRate,
null, null, 0, language);
output.format(format);
hasOutputFormat = true;
}
headerScratch.setPosition(0);
output.sampleData(headerScratch, HEADER_SIZE);
state = STATE_READING_FRAME;
}
示例6: getPosition
/** Returns the position in bytes in this WAV for the given time in microseconds. */
public long getPosition(long timeUs) {
long unroundedPosition = (timeUs * averageBytesPerSecond) / C.MICROS_PER_SECOND;
// Round down to nearest frame.
long position = (unroundedPosition / blockAlignment) * blockAlignment;
return Math.min(position, dataSize - blockAlignment) + dataStartPosition;
}
示例7: readSample
private int readSample(ExtractorInput extractorInput) throws IOException, InterruptedException {
if (sampleBytesRemaining == 0) {
extractorInput.resetPeekPosition();
if (!extractorInput.peekFully(scratch.data, 0, 4, true)) {
return RESULT_END_OF_INPUT;
}
scratch.setPosition(0);
int sampleHeaderData = scratch.readInt();
if ((sampleHeaderData & HEADER_MASK) != (synchronizedHeaderData & HEADER_MASK)
|| MpegAudioHeader.getFrameSize(sampleHeaderData) == C.LENGTH_UNSET) {
// We have lost synchronization, so attempt to resynchronize starting at the next byte.
extractorInput.skipFully(1);
synchronizedHeaderData = 0;
return RESULT_CONTINUE;
}
MpegAudioHeader.populateHeader(sampleHeaderData, synchronizedHeader);
if (basisTimeUs == C.TIME_UNSET) {
basisTimeUs = seeker.getTimeUs(extractorInput.getPosition());
if (forcedFirstSampleTimestampUs != C.TIME_UNSET) {
long embeddedFirstSampleTimestampUs = seeker.getTimeUs(0);
basisTimeUs += forcedFirstSampleTimestampUs - embeddedFirstSampleTimestampUs;
}
}
sampleBytesRemaining = synchronizedHeader.frameSize;
}
int bytesAppended = trackOutput.sampleData(extractorInput, sampleBytesRemaining, true);
if (bytesAppended == C.RESULT_END_OF_INPUT) {
return RESULT_END_OF_INPUT;
}
sampleBytesRemaining -= bytesAppended;
if (sampleBytesRemaining > 0) {
return RESULT_CONTINUE;
}
long timeUs = basisTimeUs + (samplesRead * C.MICROS_PER_SECOND / synchronizedHeader.sampleRate);
trackOutput.sampleMetadata(timeUs, C.BUFFER_FLAG_KEY_FRAME, synchronizedHeader.frameSize, 0,
null);
samplesRead += synchronizedHeader.samplesPerFrame;
sampleBytesRemaining = 0;
return RESULT_CONTINUE;
}
示例8: parseCsdBuffer
/**
* Parses the {@link Format} and frame duration from a csd buffer.
*
* @param csdBuffer The csd buffer.
* @param formatId The id for the generated format. May be null.
* @return A pair consisting of the {@link Format} and the frame duration in microseconds, or
* 0 if the duration could not be determined.
*/
private static Pair<Format, Long> parseCsdBuffer(CsdBuffer csdBuffer, String formatId) {
byte[] csdData = Arrays.copyOf(csdBuffer.data, csdBuffer.length);
int firstByte = csdData[4] & 0xFF;
int secondByte = csdData[5] & 0xFF;
int thirdByte = csdData[6] & 0xFF;
int width = (firstByte << 4) | (secondByte >> 4);
int height = (secondByte & 0x0F) << 8 | thirdByte;
float pixelWidthHeightRatio = 1f;
int aspectRatioCode = (csdData[7] & 0xF0) >> 4;
switch(aspectRatioCode) {
case 2:
pixelWidthHeightRatio = (4 * height) / (float) (3 * width);
break;
case 3:
pixelWidthHeightRatio = (16 * height) / (float) (9 * width);
break;
case 4:
pixelWidthHeightRatio = (121 * height) / (float) (100 * width);
break;
default:
// Do nothing.
break;
}
Format format = Format.createVideoSampleFormat(formatId, MimeTypes.VIDEO_MPEG2, null,
Format.NO_VALUE, Format.NO_VALUE, width, height, Format.NO_VALUE,
Collections.singletonList(csdData), Format.NO_VALUE, pixelWidthHeightRatio, null);
long frameDurationUs = 0;
int frameRateCodeMinusOne = (csdData[7] & 0x0F) - 1;
if (0 <= frameRateCodeMinusOne && frameRateCodeMinusOne < FRAME_RATE_VALUES.length) {
double frameRate = FRAME_RATE_VALUES[frameRateCodeMinusOne];
int sequenceExtensionPosition = csdBuffer.sequenceExtensionPosition;
int frameRateExtensionN = (csdData[sequenceExtensionPosition + 9] & 0x60) >> 5;
int frameRateExtensionD = (csdData[sequenceExtensionPosition + 9] & 0x1F);
if (frameRateExtensionN != frameRateExtensionD) {
frameRate *= (frameRateExtensionN + 1d) / (frameRateExtensionD + 1);
}
frameDurationUs = (long) (C.MICROS_PER_SECOND / frameRate);
}
return Pair.create(format, frameDurationUs);
}
示例9: parseAdtsHeader
/**
* Parses the sample header.
*/
private void parseAdtsHeader() {
adtsScratch.setPosition(0);
if (!hasOutputFormat) {
int audioObjectType = adtsScratch.readBits(2) + 1;
if (audioObjectType != 2) {
// The stream indicates AAC-Main (1), AAC-SSR (3) or AAC-LTP (4). When the stream indicates
// AAC-Main it's more likely that the stream contains HE-AAC (5), which cannot be
// represented correctly in the 2 bit audio_object_type field in the ADTS header. In
// practice when the stream indicates AAC-SSR or AAC-LTP it more commonly contains AAC-LC or
// HE-AAC. Since most Android devices don't support AAC-Main, AAC-SSR or AAC-LTP, and since
// indicating AAC-LC works for HE-AAC streams, we pretend that we're dealing with AAC-LC and
// hope for the best. In practice this often works.
// See: https://github.com/google/ExoPlayer/issues/774
// See: https://github.com/google/ExoPlayer/issues/1383
Log.w(TAG, "Detected audio object type: " + audioObjectType + ", but assuming AAC LC.");
audioObjectType = 2;
}
int sampleRateIndex = adtsScratch.readBits(4);
adtsScratch.skipBits(1);
int channelConfig = adtsScratch.readBits(3);
byte[] audioSpecificConfig = CodecSpecificDataUtil.buildAacAudioSpecificConfig(
audioObjectType, sampleRateIndex, channelConfig);
Pair<Integer, Integer> audioParams = CodecSpecificDataUtil.parseAacAudioSpecificConfig(
audioSpecificConfig);
Format format = Format.createAudioSampleFormat(formatId, MimeTypes.AUDIO_AAC, null,
Format.NO_VALUE, Format.NO_VALUE, audioParams.second, audioParams.first,
Collections.singletonList(audioSpecificConfig), null, 0, language);
// In this class a sample is an access unit, but the MediaFormat sample rate specifies the
// number of PCM audio samples per second.
sampleDurationUs = (C.MICROS_PER_SECOND * 1024) / format.sampleRate;
output.format(format);
hasOutputFormat = true;
} else {
adtsScratch.skipBits(10);
}
adtsScratch.skipBits(4);
int sampleSize = adtsScratch.readBits(13) - 2 /* the sync word */ - HEADER_SIZE;
if (hasCrc) {
sampleSize -= CRC_SIZE;
}
setReadingSampleState(output, sampleDurationUs, 0, sampleSize);
}
示例10: getDurationUs
/** Returns the duration in microseconds of this WAV. */
public long getDurationUs() {
long numFrames = dataSize / blockAlignment;
return (numFrames * C.MICROS_PER_SECOND) / sampleRateHz;
}
示例11: getTimeUs
/** Returns the time in microseconds for the given position in bytes in this WAV. */
public long getTimeUs(long position) {
return position * C.MICROS_PER_SECOND / averageBytesPerSecond;
}
示例12: getPosition
@Override
public long getPosition(long timeUs) {
return durationUs == C.TIME_UNSET ? 0
: firstFramePosition + (timeUs * bitrate) / (C.MICROS_PER_SECOND * BITS_PER_BYTE);
}
示例13: getTimeUs
@Override
public long getTimeUs(long position) {
return (Math.max(0, position - firstFramePosition) * C.MICROS_PER_SECOND * BITS_PER_BYTE)
/ bitrate;
}
示例14: parseTimeExpression
/**
* Parses a time expression, returning the parsed timestamp.
* <p>
* For the format of a time expression, see:
* <a href="http://www.w3.org/TR/ttaf1-dfxp/#timing-value-timeExpression">timeExpression</a>
*
* @param time A string that includes the time expression.
* @param frameAndTickRate The effective frame and tick rates of the stream.
* @return The parsed timestamp in microseconds.
* @throws SubtitleDecoderException If the given string does not contain a valid time expression.
*/
private static long parseTimeExpression(String time, FrameAndTickRate frameAndTickRate)
throws SubtitleDecoderException {
Matcher matcher = CLOCK_TIME.matcher(time);
if (matcher.matches()) {
String hours = matcher.group(1);
double durationSeconds = Long.parseLong(hours) * 3600;
String minutes = matcher.group(2);
durationSeconds += Long.parseLong(minutes) * 60;
String seconds = matcher.group(3);
durationSeconds += Long.parseLong(seconds);
String fraction = matcher.group(4);
durationSeconds += (fraction != null) ? Double.parseDouble(fraction) : 0;
String frames = matcher.group(5);
durationSeconds += (frames != null)
? Long.parseLong(frames) / frameAndTickRate.effectiveFrameRate : 0;
String subframes = matcher.group(6);
durationSeconds += (subframes != null)
? ((double) Long.parseLong(subframes)) / frameAndTickRate.subFrameRate
/ frameAndTickRate.effectiveFrameRate
: 0;
return (long) (durationSeconds * C.MICROS_PER_SECOND);
}
matcher = OFFSET_TIME.matcher(time);
if (matcher.matches()) {
String timeValue = matcher.group(1);
double offsetSeconds = Double.parseDouble(timeValue);
String unit = matcher.group(2);
switch (unit) {
case "h":
offsetSeconds *= 3600;
break;
case "m":
offsetSeconds *= 60;
break;
case "s":
// Do nothing.
break;
case "ms":
offsetSeconds /= 1000;
break;
case "f":
offsetSeconds /= frameAndTickRate.effectiveFrameRate;
break;
case "t":
offsetSeconds /= frameAndTickRate.tickRate;
break;
}
return (long) (offsetSeconds * C.MICROS_PER_SECOND);
}
throw new SubtitleDecoderException("Malformed time expression: " + time);
}
示例15: framesToDurationUs
private long framesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / sampleRate;
}