本文整理汇总了Java中org.jcodec.containers.mp4.MP4Packet类的典型用法代码示例。如果您正苦于以下问题:Java MP4Packet类的具体用法?Java MP4Packet怎么用?Java MP4Packet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MP4Packet类属于org.jcodec.containers.mp4包,在下文中一共展示了MP4Packet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: encodeNativeFrame
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void encodeNativeFrame(Picture pic) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(pic.getWidth() , pic.getHeight() , encoder.getSupportedColorSpaces()[0]);
}
// Perform conversion
try {
transform.transform(pic, toEncode);
}catch (Exception e){
return;
}
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 5, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例2: encodeNativeFrame
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void encodeNativeFrame(Picture pic) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(pic.getWidth() , pic.getHeight() , encoder.getSupportedColorSpaces()[0]);
}
// Perform conversion
transform.transform(pic, toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, timeScale, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例3: encode
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
@Override
public void encode(BufferedImage img) throws IOException {
JHVRgbToYuv420j8Bit.transform(img, toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, fps, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例4: outTimecodeSample
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
private void outTimecodeSample() throws IOException {
if (sampleDuration > 0) {
if (firstTimecode != null) {
if (fpsEstimate == -1)
fpsEstimate = prevTimecode.getFrame() + 1;
TimecodeSampleEntry tmcd = new TimecodeSampleEntry((firstTimecode.isDropFrame() ? 1 : 0),
timescale, (int) (sampleDuration / tcFrames), fpsEstimate);
sampleEntries.add(tmcd);
ByteBuffer sample = ByteBuffer.allocate(4);
sample.putInt(toCounter(firstTimecode, fpsEstimate));
sample.flip();
addFrame(new MP4Packet(sample, samplePts, timescale, sampleDuration, 0, true, null, samplePts,
sampleEntries.size() - 1));
lower.add(new Edit(sampleDuration, samplePts, 1.0f));
} else {
lower.add(new Edit(sampleDuration, -1, 1.0f));
}
}
}
示例5: getTimecode
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public MP4Packet getTimecode(MP4Packet pkt) throws IOException {
long tv = QTTimeUtil.editedToMedia(box, box.rescale(pkt.getPts(), pkt.getTimescale()), movie.getTimescale());
int sample;
int ttsInd = 0, ttsSubInd = 0;
for (sample = 0; sample < sampleCache.length - 1; sample++) {
int dur = timeToSamples[ttsInd].getSampleDuration();
if (tv < dur)
break;
tv -= dur;
ttsSubInd++;
if (ttsInd < timeToSamples.length - 1 && ttsSubInd >= timeToSamples[ttsInd].getSampleCount())
ttsInd++;
}
int frameNo = (int) ((((2 * tv * tse.getTimescale()) / box.getTimescale()) / tse.getFrameDuration()) + 1) / 2;
return new MP4Packet(pkt, getTimecode(getTimecodeSample(sample), frameNo, tse));
}
示例6: updateState
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
private void updateState(Packet packet) throws IOException
{
int eNo = ((MP4Packet) packet).getEntryNo();
if (eNo != curENo)
{
curENo = eNo;
avcCBox = parseAVCC((VideoSampleEntry) sampleEntries[curENo]);
if (decoder != null)
{
decoder.delete();
}
decoder = new OpenH264Decoder();
for (ByteBuffer sps : avcCBox.getSpsList())
{
decoder.decodeFrame(toDirectByteBuffer(sps, SPS_HEADER));
}
for (ByteBuffer pps : avcCBox.getPpsList())
{
decoder.decodeFrame(toDirectByteBuffer(pps, PPS_HEADER));
}
}
}
示例7: encodeImage
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void encodeImage(BufferedImage bi) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(bi.getWidth(), bi.getHeight(), ColorSpace.YUV420);
}
// Perform conversion
for (int i = 0; i < 3; i++) {
Arrays.fill(toEncode.getData()[i], 0);
}
transform.transform(AWTUtil.fromBufferedImage(bi), toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(_out, toEncode);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.encodeMOVPacket(result, spsList, ppsList);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 25, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例8: encodeNativeFrame
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void encodeNativeFrame(Picture pic) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(pic.getWidth(), pic.getHeight(), encoder.getSupportedColorSpaces()[0]);
}
// Perform conversion
transform.transform(pic, toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 25, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例9: encodeImage
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void encodeImage(Bitmap bi, int timeEachFrame) throws IOException {
setTimeEachFrame(timeEachFrame);
if (toEncode == null) {
toEncode = Picture.create(bi.getWidth(), bi.getHeight(), ColorSpace.YUV420);
}
// Perform conversion
for (int i = 0; i < 3; i++)
Arrays.fill(toEncode.getData()[i], 0);
transform.transform(fromBufferedImage(bi), toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(_out, toEncode);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.encodeMOVPacket(result, spsList, ppsList);
outTrack.addFrame(new MP4Packet(result,
frameNo, // frameNo * (this.timeEachFrame) = 5s, image will stop at second 5 and show the next image
timescale, // set default = 1. How many frame per duration: timescale = 2 duration = 1 => 0.5s show 1 image
duration, // auto-increase each time current duration = duration + pass duration.
frameNo,
true,
null,
frameNo,
0));
}
示例10: detectDecoder
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
private ContainerAdaptor detectDecoder(SeekableDemuxerTrack videoTrack, Packet frame) throws JCodecException {
if (videoTrack instanceof AbstractMP4DemuxerTrack) {
SampleEntry se = ((AbstractMP4DemuxerTrack) videoTrack).getSampleEntries()[((MP4Packet) frame).getEntryNo()];
VideoDecoder byFourcc = byFourcc(se.getHeader().getFourcc());
if (byFourcc instanceof H264Decoder) {
return new AVCMP4Adaptor(((AbstractMP4DemuxerTrack) videoTrack).getSampleEntries());
}
}
throw new UnsupportedFormatException("Codec is not supported");
}
示例11: detectDecoder
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
private ContainerAdaptor detectDecoder(SeekableDemuxerTrack videoTrack, Packet frame) throws JCodecException {
if (videoTrack instanceof AbstractMP4DemuxerTrack) {
SampleEntry se = ((AbstractMP4DemuxerTrack) videoTrack).getSampleEntries()[((MP4Packet) frame).getEntryNo()];
VideoDecoder byFourcc = videoDecoder(se.getHeader().getFourcc());
if (byFourcc instanceof H264Decoder)
return new AVCMP4Adaptor(((AbstractMP4DemuxerTrack) videoTrack).getSampleEntries());
}
throw new UnsupportedFormatException("Codec is not supported");
}
示例12: encodeImage
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void encodeImage(BufferedImage bi) throws IOException {
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(_out, makeFrame(bi));
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.encodeMOVPacket(result, spsList, ppsList);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 25, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例13: nextFrame
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
@Override
public synchronized MP4Packet nextFrame(ByteBuffer buffer) throws IOException {
if (stcoInd >= chunkOffsets.length)
return null;
int frameSize = getFrameSize();
int se = sampleToChunks[stscInd].getEntry();
int chSize = sampleToChunks[stscInd].getCount() * frameSize;
long pktOff = chunkOffsets[stcoInd] + posShift;
int pktSize = chSize - posShift;
ByteBuffer result = readPacketData(input, buffer, pktOff, pktSize);
long ptsRem = pts;
int doneFrames = pktSize / frameSize;
shiftPts(doneFrames);
MP4Packet pkt = new MP4Packet(result, QTTimeUtil.mediaToEdited(box, ptsRem, movie.getTimescale()), timescale,
(int) (pts - ptsRem), curFrame, true, null, ptsRem, se - 1, pktOff, pktSize, true);
curFrame += doneFrames;
posShift = 0;
++stcoInd;
if (stscInd < sampleToChunks.length - 1 && (stcoInd + 1) == sampleToChunks[stscInd + 1].getFirst())
stscInd++;
return pkt;
}
示例14: nextFrame
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public synchronized MP4Packet nextFrame() throws IOException {
if (curFrame >= sizes.length)
return null;
int size = sizes[(int) curFrame];
return nextFrame(ByteBuffer.allocate(size));
}
示例15: processNal
import org.jcodec.containers.mp4.MP4Packet; //导入依赖的package包/类
public void processNal(ByteBuffer inputNAL) throws IOException
{
NALType type = NALType.fromBitStream(inputNAL);
// Handle 0x00 0x00 0x00 0x00 0x01 start sequence if neccessary.
if (inputNAL.get(3) == 0 && inputNAL.get(4) == 1)
{
inputNAL.position(1);
}
ByteBuffer nal = ByteBuffer.allocate(inputNAL.remaining());
nal.put(inputNAL);
nal.clear();
switch (type)
{
case SPS:
nal.position(5); // Skip header
compareAndAddUniqueSPS(nal);
break;
case PPS:
nal.position(5); // Skip header
compareAndAddUniquePPS(nal);
break;
default:
boolean iframe = type == NALType.CODED_SLICE_IDR_PICTURE;
nal.putInt(nal.remaining() - 4);
nal.clear();
MP4Packet packet = new MP4Packet(nal, pts, timescale, 1, frameNo, iframe, null, dts, 0);
track.addFrame(packet);
pts++;
dts++;
frameNo++;
break;
}
}