本文整理汇总了Java中com.google.android.exoplayer.C.SAMPLE_FLAG_SYNC属性的典型用法代码示例。如果您正苦于以下问题:Java C.SAMPLE_FLAG_SYNC属性的具体用法?Java C.SAMPLE_FLAG_SYNC怎么用?Java C.SAMPLE_FLAG_SYNC使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类com.google.android.exoplayer.C
的用法示例。
在下文中一共展示了C.SAMPLE_FLAG_SYNC属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testParsesValidMp4File
public void testParsesValidMp4File() throws Exception {
TestUtil.consumeTestData(extractor,
getTestInputData(true /* includeStss */, false /* mp4vFormat */));
// The seek map is correct.
assertSeekMap(extractorOutput.seekMap, true);
// The video and audio formats are set correctly.
assertEquals(2, extractorOutput.trackOutputs.size());
MediaFormat videoFormat = extractorOutput.trackOutputs.get(0).format;
MediaFormat audioFormat = extractorOutput.trackOutputs.get(1).format;
assertEquals(MimeTypes.VIDEO_H264, videoFormat.mimeType);
assertEquals(VIDEO_WIDTH, videoFormat.width);
assertEquals(VIDEO_HEIGHT, videoFormat.height);
assertEquals(MimeTypes.AUDIO_AAC, audioFormat.mimeType);
// The timestamps and sizes are set correctly.
FakeTrackOutput videoTrackOutput = extractorOutput.trackOutputs.get(0);
videoTrackOutput.assertSampleCount(SAMPLE_TIMESTAMPS.length);
for (int i = 0; i < SAMPLE_TIMESTAMPS.length; i++) {
byte[] sampleData = getOutputSampleData(i, true);
int sampleFlags = SAMPLE_IS_SYNC[i] ? C.SAMPLE_FLAG_SYNC : 0;
long sampleTimestampUs = getVideoTimestampUs(SAMPLE_TIMESTAMPS[i]);
videoTrackOutput.assertSample(i, sampleData, sampleTimestampUs, sampleFlags, null);
}
}
示例2: testParsesValidMp4FileWithoutStss
public void testParsesValidMp4FileWithoutStss() throws Exception {
TestUtil.consumeTestData(extractor,
getTestInputData(false /* includeStss */, false /* mp4vFormat */));
// The seek map is correct.
assertSeekMap(extractorOutput.seekMap, false);
// The timestamps and sizes are set correctly, and all samples are synchronization samples.
FakeTrackOutput videoTrackOutput = extractorOutput.trackOutputs.get(0);
videoTrackOutput.assertSampleCount(SAMPLE_TIMESTAMPS.length);
for (int i = 0; i < SAMPLE_TIMESTAMPS.length; i++) {
byte[] sampleData = getOutputSampleData(i, true);
int sampleFlags = C.SAMPLE_FLAG_SYNC;
long sampleTimestampUs = getVideoTimestampUs(SAMPLE_TIMESTAMPS[i]);
videoTrackOutput.assertSample(i, sampleData, sampleTimestampUs, sampleFlags, null);
}
}
示例3: testParsesValidMp4vFile
public void testParsesValidMp4vFile() throws Exception {
TestUtil.consumeTestData(extractor,
getTestInputData(true /* includeStss */, true /* mp4vFormat */));
// The seek map is correct.
assertSeekMap(extractorOutput.seekMap, true);
// The video and audio formats are set correctly.
assertEquals(2, extractorOutput.trackOutputs.size());
MediaFormat videoFormat = extractorOutput.trackOutputs.get(0).format;
MediaFormat audioFormat = extractorOutput.trackOutputs.get(1).format;
assertEquals(MimeTypes.VIDEO_MP4V, videoFormat.mimeType);
assertEquals(VIDEO_MP4V_WIDTH, videoFormat.width);
assertEquals(VIDEO_MP4V_HEIGHT, videoFormat.height);
assertEquals(MimeTypes.AUDIO_AAC, audioFormat.mimeType);
// The timestamps and sizes are set correctly.
FakeTrackOutput videoTrackOutput = extractorOutput.trackOutputs.get(0);
videoTrackOutput.assertSampleCount(SAMPLE_TIMESTAMPS.length);
for (int i = 0; i < SAMPLE_TIMESTAMPS.length; i++) {
byte[] sampleData = getOutputSampleData(i, false);
int sampleFlags = SAMPLE_IS_SYNC[i] ? C.SAMPLE_FLAG_SYNC : 0;
long sampleTimestampUs = getVideoTimestampUs(SAMPLE_TIMESTAMPS[i]);
videoTrackOutput.assertSample(i, sampleData, sampleTimestampUs, sampleFlags, null);
}
}
示例4: skipToKeyframeBefore
/**
* Attempts to locate the keyframe before the specified time, if it's present in the buffer.
*
* @param timeUs The seek time.
* @return The offset of the keyframe's data if the keyframe was present. -1 otherwise.
*/
public synchronized long skipToKeyframeBefore(long timeUs) {
if (queueSize == 0 || timeUs < timesUs[relativeReadIndex]) {
return -1;
}
int lastWriteIndex = (relativeWriteIndex == 0 ? capacity : relativeWriteIndex) - 1;
long lastTimeUs = timesUs[lastWriteIndex];
if (timeUs > lastTimeUs) {
return -1;
}
// TODO: This can be optimized further using binary search, although the fact that the array
// is cyclic means we'd need to implement the binary search ourselves.
int sampleCount = 0;
int sampleCountToKeyframe = -1;
int searchIndex = relativeReadIndex;
while (searchIndex != relativeWriteIndex) {
if (timesUs[searchIndex] > timeUs) {
// We've gone too far.
break;
} else if ((flags[searchIndex] & C.SAMPLE_FLAG_SYNC) != 0) {
// We've found a keyframe, and we're still before the seek position.
sampleCountToKeyframe = sampleCount;
}
searchIndex = (searchIndex + 1) % capacity;
sampleCount++;
}
if (sampleCountToKeyframe == -1) {
return -1;
}
queueSize -= sampleCountToKeyframe;
relativeReadIndex = (relativeReadIndex + sampleCountToKeyframe) % capacity;
absoluteReadIndex += sampleCountToKeyframe;
return offsets[relativeReadIndex];
}
示例5: getIndexOfEarlierOrEqualSynchronizationSample
/**
* Returns the sample index of the closest synchronization sample at or before the given
* timestamp, if one is available.
*
* @param timeUs Timestamp adjacent to which to find a synchronization sample.
* @return Index of the synchronization sample, or {@link #NO_SAMPLE} if none.
*/
public int getIndexOfEarlierOrEqualSynchronizationSample(long timeUs) {
int startIndex = Util.binarySearchFloor(timestampsUs, timeUs, true, false);
for (int i = startIndex; i >= 0; i--) {
if (timestampsUs[i] <= timeUs && (flags[i] & C.SAMPLE_FLAG_SYNC) != 0) {
return i;
}
}
return NO_SAMPLE;
}
示例6: getIndexOfLaterOrEqualSynchronizationSample
/**
* Returns the sample index of the closest synchronization sample at or after the given timestamp,
* if one is available.
*
* @param timeUs Timestamp adjacent to which to find a synchronization sample.
* @return index Index of the synchronization sample, or {@link #NO_SAMPLE} if none.
*/
public int getIndexOfLaterOrEqualSynchronizationSample(long timeUs) {
int startIndex = Util.binarySearchCeil(timestampsUs, timeUs, true, false);
for (int i = startIndex; i < timestampsUs.length; i++) {
if (timestampsUs[i] >= timeUs && (flags[i] & C.SAMPLE_FLAG_SYNC) != 0) {
return i;
}
}
return NO_SAMPLE;
}
示例7: getIndexOfEarlierOrEqualSynchronizationSample
/**
* Returns the sample index of the closest synchronization sample at or before the given
* timestamp, if one is available.
*
* @param timeUs Timestamp adjacent to which to find a synchronization sample.
* @return Index of the synchronization sample, or {@link #NO_SAMPLE} if none.
*/
public int getIndexOfEarlierOrEqualSynchronizationSample(long timeUs) {
// Video frame timestamps may not be sorted, so the behavior of this call can be undefined.
// Frames are not reordered past synchronization samples so this works in practice.
int startIndex = Util.binarySearchFloor(timestampsUs, timeUs, true, false);
for (int i = startIndex; i >= 0; i--) {
if ((flags[i] & C.SAMPLE_FLAG_SYNC) != 0) {
return i;
}
}
return NO_SAMPLE;
}
示例8: getIndexOfLaterOrEqualSynchronizationSample
/**
* Returns the sample index of the closest synchronization sample at or after the given timestamp,
* if one is available.
*
* @param timeUs Timestamp adjacent to which to find a synchronization sample.
* @return index Index of the synchronization sample, or {@link #NO_SAMPLE} if none.
*/
public int getIndexOfLaterOrEqualSynchronizationSample(long timeUs) {
int startIndex = Util.binarySearchCeil(timestampsUs, timeUs, true, false);
for (int i = startIndex; i < timestampsUs.length; i++) {
if ((flags[i] & C.SAMPLE_FLAG_SYNC) != 0) {
return i;
}
}
return NO_SAMPLE;
}
示例9: assertSample
private void assertSample(int index, byte[] expectedMedia, long timeUs, boolean keyframe,
boolean invisible, byte[] encryptionKey, FakeTrackOutput output) {
if (encryptionKey != null) {
expectedMedia = TestUtil.joinByteArrays(
new byte[] {(byte) StreamBuilder.TEST_INITIALIZATION_VECTOR.length},
StreamBuilder.TEST_INITIALIZATION_VECTOR, expectedMedia);
}
int flags = 0;
flags |= keyframe ? C.SAMPLE_FLAG_SYNC : 0;
flags |= invisible ? C.SAMPLE_FLAG_DECODE_ONLY : 0;
flags |= encryptionKey != null ? C.SAMPLE_FLAG_ENCRYPTED : 0;
output.assertSample(index, expectedMedia, timeUs, flags, encryptionKey);
}
示例10: consume
@Override
public void consume(ParsableByteArray data, long pesTimeUs, boolean startOfPacket) {
while (data.bytesLeft() > 0) {
int offset = data.getPosition();
int limit = data.limit();
byte[] dataArray = data.data;
// Append the data to the buffer.
totalBytesWritten += data.bytesLeft();
output.sampleData(data, data.bytesLeft());
// Scan the appended data, processing NAL units as they are encountered
while (offset < limit) {
int nextNalUnitOffset = NalUnitUtil.findNalUnit(dataArray, offset, limit, prefixFlags);
if (nextNalUnitOffset < limit) {
// We've seen the start of a NAL unit.
// This is the length to the start of the unit. It may be negative if the NAL unit
// actually started in previously consumed data.
int lengthToNalUnit = nextNalUnitOffset - offset;
if (lengthToNalUnit > 0) {
feedNalUnitTargetBuffersData(dataArray, offset, nextNalUnitOffset);
}
int nalUnitType = NalUnitUtil.getNalUnitType(dataArray, nextNalUnitOffset);
int bytesWrittenPastNalUnit = limit - nextNalUnitOffset;
switch (nalUnitType) {
case NAL_UNIT_TYPE_IDR:
isKeyframe = true;
break;
case NAL_UNIT_TYPE_AUD:
if (foundFirstSample) {
if (ifrParserBuffer != null && ifrParserBuffer.isCompleted()) {
int sliceType = ifrParserBuffer.getSliceType();
isKeyframe |= (sliceType == FRAME_TYPE_I || sliceType == FRAME_TYPE_ALL_I);
ifrParserBuffer.reset();
}
if (isKeyframe && !hasOutputFormat && sps.isCompleted() && pps.isCompleted()) {
parseMediaFormat(sps, pps);
}
int flags = isKeyframe ? C.SAMPLE_FLAG_SYNC : 0;
int size = (int) (totalBytesWritten - samplePosition) - bytesWrittenPastNalUnit;
output.sampleMetadata(sampleTimeUs, flags, size, bytesWrittenPastNalUnit, null);
}
foundFirstSample = true;
samplePosition = totalBytesWritten - bytesWrittenPastNalUnit;
sampleTimeUs = pesTimeUs;
isKeyframe = false;
break;
}
// If the length to the start of the unit is negative then we wrote too many bytes to the
// NAL buffers. Discard the excess bytes when notifying that the unit has ended.
feedNalUnitTargetEnd(pesTimeUs, lengthToNalUnit < 0 ? -lengthToNalUnit : 0);
// Notify the start of the next NAL unit.
feedNalUnitTargetBuffersStart(nalUnitType);
// Continue scanning the data.
offset = nextNalUnitOffset + 3;
} else {
feedNalUnitTargetBuffersData(dataArray, offset, limit);
offset = limit;
}
}
}
}
示例11: consume
@Override
public void consume(ParsableByteArray data, long pesTimeUs, boolean startOfPacket) {
while (data.bytesLeft() > 0) {
int offset = data.getPosition();
int limit = data.limit();
byte[] dataArray = data.data;
// Append the data to the buffer.
totalBytesWritten += data.bytesLeft();
output.sampleData(data, data.bytesLeft());
// Scan the appended data, processing NAL units as they are encountered
while (offset < limit) {
int nextNalUnitOffset = NalUnitUtil.findNalUnit(dataArray, offset, limit, prefixFlags);
if (nextNalUnitOffset < limit) {
// We've seen the start of a NAL unit.
// This is the length to the start of the unit. It may be negative if the NAL unit
// actually started in previously consumed data.
int lengthToNalUnit = nextNalUnitOffset - offset;
if (lengthToNalUnit > 0) {
feedNalUnitTargetBuffersData(dataArray, offset, nextNalUnitOffset);
}
int nalUnitType = NalUnitUtil.getH265NalUnitType(dataArray, nextNalUnitOffset);
int bytesWrittenPastNalUnit = limit - nextNalUnitOffset;
if (isFirstSliceSegmentInPic(dataArray, nextNalUnitOffset)) {
if (foundFirstSample) {
if (isKeyframe && !hasOutputFormat && vps.isCompleted() && sps.isCompleted()
&& pps.isCompleted()) {
parseMediaFormat(vps, sps, pps);
}
int flags = isKeyframe ? C.SAMPLE_FLAG_SYNC : 0;
int size = (int) (totalBytesWritten - samplePosition) - bytesWrittenPastNalUnit;
output.sampleMetadata(sampleTimeUs, flags, size, bytesWrittenPastNalUnit, null);
}
foundFirstSample = true;
samplePosition = totalBytesWritten - bytesWrittenPastNalUnit;
sampleTimeUs = pesTimeUs;
isKeyframe = isRandomAccessPoint(nalUnitType);
}
// If the length to the start of the unit is negative then we wrote too many bytes to the
// NAL buffers. Discard the excess bytes when notifying that the unit has ended.
feedNalUnitTargetEnd(pesTimeUs, lengthToNalUnit < 0 ? -lengthToNalUnit : 0);
// Notify the start of the next NAL unit.
feedNalUnitTargetBuffersStart(nalUnitType);
// Continue scanning the data.
offset = nextNalUnitOffset + 3;
} else {
feedNalUnitTargetBuffersData(dataArray, offset, limit);
offset = limit;
}
}
}
}
示例12: consume
@Override
public void consume(ParsableByteArray data) {
while (data.bytesLeft() > 0) {
int offset = data.getPosition();
int limit = data.limit();
byte[] dataArray = data.data;
// Append the data to the buffer.
totalBytesWritten += data.bytesLeft();
output.sampleData(data, data.bytesLeft());
int searchOffset = offset;
while (true) {
int startCodeOffset = NalUnitUtil.findNalUnit(dataArray, searchOffset, limit, prefixFlags);
if (startCodeOffset == limit) {
// We've scanned to the end of the data without finding another start code.
if (!hasOutputFormat) {
csdBuffer.onData(dataArray, offset, limit);
}
return;
}
// We've found a start code with the following value.
int startCodeValue = data.data[startCodeOffset + 3] & 0xFF;
if (!hasOutputFormat) {
// This is the number of bytes from the current offset to the start of the next start
// code. It may be negative if the start code started in the previously consumed data.
int lengthToStartCode = startCodeOffset - offset;
if (lengthToStartCode > 0) {
csdBuffer.onData(dataArray, offset, startCodeOffset);
}
// This is the number of bytes belonging to the next start code that have already been
// passed to csdDataTargetBuffer.
int bytesAlreadyPassed = lengthToStartCode < 0 ? -lengthToStartCode : 0;
if (csdBuffer.onStartCode(startCodeValue, bytesAlreadyPassed)) {
// The csd data is complete, so we can parse and output the media format.
Pair<MediaFormat, Long> result = parseCsdBuffer(csdBuffer);
output.format(result.first);
frameDurationUs = result.second;
hasOutputFormat = true;
}
}
if (hasOutputFormat && (startCodeValue == START_GROUP || startCodeValue == START_PICTURE)) {
int bytesWrittenPastStartCode = limit - startCodeOffset;
if (foundFirstFrameInGroup) {
int flags = isKeyframe ? C.SAMPLE_FLAG_SYNC : 0;
int size = (int) (totalBytesWritten - framePosition) - bytesWrittenPastStartCode;
output.sampleMetadata(frameTimeUs, flags, size, bytesWrittenPastStartCode, null);
isKeyframe = false;
}
if (startCodeValue == START_GROUP) {
foundFirstFrameInGroup = false;
isKeyframe = true;
} else /* startCode == START_PICTURE */ {
frameTimeUs = pesPtsUsAvailable ? pesTimeUs : (frameTimeUs + frameDurationUs);
framePosition = totalBytesWritten - bytesWrittenPastStartCode;
pesPtsUsAvailable = false;
foundFirstFrameInGroup = true;
}
}
offset = startCodeOffset;
searchOffset = offset + 3;
}
}
}
示例13: outputSample
private void outputSample(int offset) {
int flags = sampleIsKeyframe ? C.SAMPLE_FLAG_SYNC : 0;
int size = (int) (nalUnitStartPosition - samplePosition);
output.sampleMetadata(sampleTimeUs, flags, size, offset, null);
}
示例14: rechunk
/**
* Rechunk the given fixed sample size input to produce a new sequence of samples.
*
* @param fixedSampleSize Size in bytes of each sample.
* @param chunkOffsets Chunk offsets in the MP4 stream to rechunk.
* @param chunkSampleCounts Sample counts for each of the MP4 stream's chunks.
* @param timestampDeltaInTimeUnits Timestamp delta between each sample in time units.
*/
public static Results rechunk(
int fixedSampleSize,
long[] chunkOffsets,
int[] chunkSampleCounts,
long timestampDeltaInTimeUnits) {
int maxSampleCount = MAX_SAMPLE_SIZE / fixedSampleSize;
// Count the number of new, rechunked buffers.
int rechunkedSampleCount = 0;
for (int chunkSampleCount : chunkSampleCounts) {
rechunkedSampleCount += Util.ceilDivide(chunkSampleCount, maxSampleCount);
}
long[] offsets = new long[rechunkedSampleCount];
int[] sizes = new int[rechunkedSampleCount];
int maximumSize = 0;
long[] timestamps = new long[rechunkedSampleCount];
int[] flags = new int[rechunkedSampleCount];
int originalSampleIndex = 0;
int newSampleIndex = 0;
for (int chunkIndex = 0; chunkIndex < chunkSampleCounts.length; chunkIndex++) {
int chunkSamplesRemaining = chunkSampleCounts[chunkIndex];
long sampleOffset = chunkOffsets[chunkIndex];
while (chunkSamplesRemaining > 0) {
int bufferSampleCount = Math.min(maxSampleCount, chunkSamplesRemaining);
offsets[newSampleIndex] = sampleOffset;
sizes[newSampleIndex] = fixedSampleSize * bufferSampleCount;
maximumSize = Math.max(maximumSize, sizes[newSampleIndex]);
timestamps[newSampleIndex] = (timestampDeltaInTimeUnits * originalSampleIndex);
flags[newSampleIndex] = C.SAMPLE_FLAG_SYNC;
sampleOffset += sizes[newSampleIndex];
originalSampleIndex += bufferSampleCount;
chunkSamplesRemaining -= bufferSampleCount;
newSampleIndex++;
}
}
return new Results(offsets, sizes, maximumSize, timestamps, flags);
}
示例15: endMasterElement
void endMasterElement(int id) throws ParserException {
switch (id) {
case ID_SEGMENT_INFO:
if (timecodeScale == C.UNKNOWN_TIME_US) {
// timecodeScale was omitted. Use the default value.
timecodeScale = 1000000;
}
if (durationTimecode != C.UNKNOWN_TIME_US) {
durationUs = scaleTimecodeToUs(durationTimecode);
}
return;
case ID_SEEK:
if (seekEntryId == UNKNOWN || seekEntryPosition == UNKNOWN) {
throw new ParserException("Mandatory element SeekID or SeekPosition not found");
}
if (seekEntryId == ID_CUES) {
cuesContentPosition = seekEntryPosition;
}
return;
case ID_CUES:
if (!sentSeekMap) {
extractorOutput.seekMap(buildSeekMap());
sentSeekMap = true;
} else {
// We have already built the cues. Ignore.
}
return;
case ID_BLOCK_GROUP:
if (blockState != BLOCK_STATE_DATA) {
// We've skipped this block (due to incompatible track number).
return;
}
// If the ReferenceBlock element was not found for this sample, then it is a keyframe.
if (!sampleSeenReferenceBlock) {
blockFlags |= C.SAMPLE_FLAG_SYNC;
}
commitSampleToOutput(tracks.get(blockTrackNumber), blockTimeUs);
blockState = BLOCK_STATE_START;
return;
case ID_CONTENT_ENCODING:
if (currentTrack.hasContentEncryption) {
if (currentTrack.encryptionKeyId == null) {
throw new ParserException("Encrypted Track found but ContentEncKeyID was not found");
}
if (!sentDrmInitData) {
extractorOutput.drmInitData(new DrmInitData.Universal(
new SchemeInitData(MimeTypes.VIDEO_WEBM, currentTrack.encryptionKeyId)));
sentDrmInitData = true;
}
}
return;
case ID_CONTENT_ENCODINGS:
if (currentTrack.hasContentEncryption && currentTrack.sampleStrippedBytes != null) {
throw new ParserException("Combining encryption and compression is not supported");
}
return;
case ID_TRACK_ENTRY:
if (tracks.get(currentTrack.number) == null && isCodecSupported(currentTrack.codecId)) {
currentTrack.initializeOutput(extractorOutput, currentTrack.number, durationUs);
tracks.put(currentTrack.number, currentTrack);
} else {
// We've seen this track entry before, or the codec is unsupported. Do nothing.
}
currentTrack = null;
return;
case ID_TRACKS:
if (tracks.size() == 0) {
throw new ParserException("No valid tracks were found");
}
extractorOutput.endTracks();
return;
default:
return;
}
}