本文整理汇总了Java中com.googlecode.mp4parser.authoring.Track类的典型用法代码示例。如果您正苦于以下问题:Java Track类的具体用法?Java Track怎么用?Java Track使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Track类属于com.googlecode.mp4parser.authoring包,在下文中一共展示了Track类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getClipSamples
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
private static int[] getClipSamples(Track track, double startTime, double endTime) {
int currentSample = 0;
double currentTime = 0;
double lastTime = -1;
int startSample = -1;
int endSample = -1;
for (int i = 0; i < track.getSampleDurations().length; i++) {
long delta = track.getSampleDurations()[i];
if (currentTime > lastTime && currentTime <= startTime) {
// current sample is still before the new starttime
startSample = currentSample;
}
if (currentTime > lastTime && currentTime <= endTime) {
// current sample is after the new start time and still before the new endtime
endSample = currentSample;
}
lastTime = currentTime;
currentTime += (double) delta / (double) track.getTrackMetaData().getTimescale();
currentSample++;
}
return new int[] {startSample, endSample};
}
示例2: correctTimeToNextSyncSample
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
private double correctTimeToNextSyncSample(Track track, double cutHere) {
double[] timeOfSyncSamples = new double[track.getSyncSamples().length];
long currentSample = 0;
double currentTime = 0;
for (int i = 0; i < track.getDecodingTimeEntries().size(); i++) {
TimeToSampleBox.Entry entry = track.getDecodingTimeEntries().get(i);
for (int j = 0; j < entry.getCount(); j++) {
if (Arrays.binarySearch(track.getSyncSamples(), currentSample + 1) >= 0) {
// samples always start with 1 but we start with zero therefore +1
timeOfSyncSamples[Arrays.binarySearch(track.getSyncSamples(), currentSample + 1)] = currentTime;
}
currentTime += (double) entry.getDelta() / (double) track.getTrackMetaData().getTimescale();
currentSample++;
}
}
for (double timeOfSyncSample : timeOfSyncSamples) {
if (timeOfSyncSample > cutHere) {
return timeOfSyncSample;
}
}
return timeOfSyncSamples[timeOfSyncSamples.length - 1];
}
示例3: calculateFragmentDurations
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
/**
* Calculates the length of each fragment in the given <code>track</code> (as part of <code>movie</code>).
*
* @param track target of calculation
* @param movie the <code>track</code> must be part of this <code>movie</code>
* @return the duration of each fragment in track timescale
*/
public long[] calculateFragmentDurations(Track track, Movie movie) {
long[] startSamples = intersectionFinder.sampleNumbers(track, movie);
long[] durations = new long[startSamples.length];
int currentFragment = 0;
int currentSample = 1; // sync samples start with 1 !
for (TimeToSampleBox.Entry entry : track.getDecodingTimeEntries()) {
for (int max = currentSample + l2i(entry.getCount()); currentSample < max; currentSample++) {
// in this loop we go through the entry.getCount() samples starting from current sample.
// the next entry.getCount() samples have the same decoding time.
if (currentFragment != startSamples.length - 1 && currentSample == startSamples[currentFragment + 1]) {
// we are not in the last fragment && the current sample is the start sample of the next fragment
currentFragment++;
}
durations[currentFragment] += entry.getDelta();
}
}
return durations;
}
示例4: getAacAudioQuality
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
private AudioQuality getAacAudioQuality(Track track, AudioSampleEntry ase) {
AudioQuality l = new AudioQuality();
final ESDescriptorBox esDescriptorBox = ase.getBoxes(ESDescriptorBox.class).get(0);
final AudioSpecificConfig audioSpecificConfig = esDescriptorBox.getEsDescriptor().getDecoderConfigDescriptor().getAudioSpecificInfo();
if (audioSpecificConfig.getSbrPresentFlag() == 1) {
l.fourCC = "AACH";
} else if (audioSpecificConfig.getPsPresentFlag() == 1) {
l.fourCC = "AACP"; //I'm not sure if that's what MS considers as AAC+ - because actually AAC+ and AAC-HE should be the same...
} else {
l.fourCC = "AACL";
}
l.bitrate = getBitrate(track);
l.audioTag = 255;
l.samplingRate = ase.getSampleRate();
l.channels = ase.getChannelCount();
l.bitPerSample = ase.getSampleSize();
l.packetSize = 4;
l.codecPrivateData = getAudioCodecPrivateData(audioSpecificConfig);
//Index="0" Bitrate="103000" AudioTag="255" SamplingRate="44100" Channels="2" BitsPerSample="16" packetSize="4" CodecPrivateData=""
return l;
}
示例5: getVideoQuality
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
private VideoQuality getVideoQuality(Track track, VisualSampleEntry vse) {
VideoQuality l;
if ("avc1".equals(getFormat(vse))) {
AvcConfigurationBox avcConfigurationBox = vse.getBoxes(AvcConfigurationBox.class).get(0);
l = new VideoQuality();
l.bitrate = getBitrate(track);
l.codecPrivateData = Hex.encodeHex(getAvcCodecPrivateData(avcConfigurationBox));
l.fourCC = "AVC1";
l.width = vse.getWidth();
l.height = vse.getHeight();
l.nalLength = avcConfigurationBox.getLengthSizeMinusOne() + 1;
} else {
throw new InternalError("I don't know how to handle video of type " + getFormat(vse));
}
return l;
}
示例6: SilenceTrackImpl
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
public SilenceTrackImpl(Track ofType, long ms) {
source = ofType;
if ("mp4a".equals(ofType.getSampleDescriptionBox().getSampleEntry().getType())) {
long numFrames = getTrackMetaData().getTimescale() * ms / 1000 / 1024;
long standZeit = getTrackMetaData().getTimescale() * ms / numFrames / 1000;
entry = new TimeToSampleBox.Entry(numFrames, standZeit);
while (numFrames-- > 0) {
samples.add((ByteBuffer) ByteBuffer.wrap(new byte[]{
0x21, 0x10, 0x04, 0x60, (byte) 0x8c, 0x1c,
}).rewind());
}
} else {
throw new RuntimeException("Tracks of type " + ofType.getClass().getSimpleName() + " are not supported");
}
}
示例7: getTimes
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
private static long[] getTimes(Track track, long[] syncSamples, long targetTimeScale) {
long[] syncSampleTimes = new long[syncSamples.length];
Queue<TimeToSampleBox.Entry> timeQueue = new LinkedList<TimeToSampleBox.Entry>(track.getDecodingTimeEntries());
int currentSample = 1; // first syncsample is 1
long currentDuration = 0;
long currentDelta = 0;
int currentSyncSampleIndex = 0;
long left = 0;
while (currentSample <= syncSamples[syncSamples.length - 1]) {
if (currentSample++ == syncSamples[currentSyncSampleIndex]) {
syncSampleTimes[currentSyncSampleIndex++] = (currentDuration * targetTimeScale) / track.getTrackMetaData().getTimescale();
}
if (left-- == 0) {
TimeToSampleBox.Entry entry = timeQueue.poll();
left = entry.getCount() - 1;
currentDelta = entry.getDelta();
}
currentDuration += currentDelta;
}
return syncSampleTimes;
}
示例8: getChunkSizes
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
/**
* Gets the chunk sizes for the given track.
*
* @param track
* @param movie
* @return
*/
int[] getChunkSizes(Track track, Movie movie) {
long[] referenceChunkStarts = intersectionFinder.sampleNumbers(track, movie);
int[] chunkSizes = new int[referenceChunkStarts.length];
for (int i = 0; i < referenceChunkStarts.length; i++) {
long start = referenceChunkStarts[i] - 1;
long end;
if (referenceChunkStarts.length == i + 1) {
end = track.getSamples().size();
} else {
end = referenceChunkStarts[i + 1] - 1;
}
chunkSizes[i] = l2i(end - start);
// The Stretch makes sure that there are as much audio and video chunks!
}
assert DefaultMp4Builder.this.track2Sample.get(track).size() == sum(chunkSizes) : "The number of samples and the sum of all chunk lengths must be equal";
return chunkSizes;
}
示例9: createTrex
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
protected Box createTrex(Movie movie, Track track) {
TrackExtendsBox trex = new TrackExtendsBox();
trex.setTrackId(track.getTrackMetaData().getTrackId());
trex.setDefaultSampleDescriptionIndex(1);
trex.setDefaultSampleDuration(0);
trex.setDefaultSampleSize(0);
SampleFlags sf = new SampleFlags();
if ("soun".equals(track.getHandler())) {
// as far as I know there is no audio encoding
// where the sample are not self contained.
sf.setSampleDependsOn(2);
sf.setSampleIsDependedOn(2);
}
trex.setDefaultSampleFlags(sf);
return trex;
}
示例10: createMinf
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
protected Box createMinf(Track track, Movie movie) {
MediaInformationBox minf = new MediaInformationBox();
minf.addBox(track.getMediaHeaderBox());
minf.addBox(createDinf(movie, track));
minf.addBox(createStbl(movie, track));
return minf;
}
示例11: createDinf
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
protected DataInformationBox createDinf(Movie movie, Track track) {
DataInformationBox dinf = new DataInformationBox();
DataReferenceBox dref = new DataReferenceBox();
dinf.addBox(dref);
DataEntryUrlBox url = new DataEntryUrlBox();
url.setFlags(1);
dref.addBox(url);
return dinf;
}
示例12: getBitrate
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
public long getBitrate(Track track) {
long bitrate = 0;
for (ByteBuffer sample : track.getSamples()) {
bitrate += sample.limit();
}
bitrate *= 8; // from bytes to bits
bitrate /= ((double) getDuration(track)) / track.getTrackMetaData().getTimescale(); // per second
return bitrate;
}
示例13: getDuration
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
protected static long getDuration(Track track) {
long duration = 0;
for (TimeToSampleBox.Entry entry : track.getDecodingTimeEntries()) {
duration += entry.getCount() * entry.getDelta();
}
return duration;
}
示例14: removeUnknownTracks
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
private Movie removeUnknownTracks(Movie source) {
Movie nuMovie = new Movie();
for (Track track : source.getTracks()) {
if ("vide".equals(track.getHandler()) || "soun".equals(track.getHandler())) {
nuMovie.addTrack(track);
} else {
LOG.fine("Removed track " + track);
}
}
return nuMovie;
}
示例15: correctTimescale
import com.googlecode.mp4parser.authoring.Track; //导入依赖的package包/类
/**
* Returns a new <code>Movie</code> in that all tracks have the timescale 10000000. CTS & DTS are modified
* in a way that even with more than one framerate the fragments exactly begin at the same time.
*
* @param movie
* @return a movie with timescales suitable for smooth streaming manifests
*/
public Movie correctTimescale(Movie movie) {
Movie nuMovie = new Movie();
for (Track track : movie.getTracks()) {
nuMovie.addTrack(new ChangeTimeScaleTrack(track, timeScale, ismvBuilder.getFragmentIntersectionFinder().sampleNumbers(track, movie)));
}
return nuMovie;
}