本文整理汇总了Java中org.jcodec.common.model.Picture.create方法的典型用法代码示例。如果您正苦于以下问题:Java Picture.create方法的具体用法?Java Picture.create怎么用?Java Picture.create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.jcodec.common.model.Picture
的用法示例。
在下文中一共展示了Picture.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: encodeNativeFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void encodeNativeFrame(Picture pic) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(pic.getWidth() , pic.getHeight() , encoder.getSupportedColorSpaces()[0]);
}
// Perform conversion
try {
transform.transform(pic, toEncode);
}catch (Exception e){
return;
}
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 5, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例2: transcode
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public List<ByteBuffer> transcode() throws IOException {
H264Decoder decoder = new H264Decoder();
decoder.addSps(avcC.getSpsList());
decoder.addPps(avcC.getPpsList());
Picture buf = Picture.create(mbW << 4, mbH << 4, ColorSpace.YUV420);
Frame dec = null;
for (VirtualPacket virtualPacket : head) {
dec = decoder.decodeFrame(H264Utils.splitMOVPacket(virtualPacket.getData(), avcC), buf.getData());
}
H264Encoder encoder = new H264Encoder(rc);
ByteBuffer tmp = ByteBuffer.allocate(frameSize);
List<ByteBuffer> result = new ArrayList<ByteBuffer>();
for (VirtualPacket pkt : tail) {
dec = decoder.decodeFrame(H264Utils.splitMOVPacket(pkt.getData(), avcC), buf.getData());
tmp.clear();
ByteBuffer res = encoder.encodeFrame(dec, tmp);
ByteBuffer out = ByteBuffer.allocate(frameSize);
processFrame(res, out);
result.add(out);
}
return result;
}
示例3: encodeNativeFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void encodeNativeFrame(Picture pic) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(pic.getWidth() , pic.getHeight() , encoder.getSupportedColorSpaces()[0]);
}
// Perform conversion
transform.transform(pic, toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, timeScale, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例4: toColorArray
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public static int[] toColorArray(Picture src){
if (src.getColor() != ColorSpace.RGB) {
Transform transform = ColorUtil.getTransform(src.getColor(), ColorSpace.RGB);
Picture rgb = Picture.create(src.getWidth(), src.getHeight(), ColorSpace.RGB, src.getCrop());
transform.transform(src, rgb);
src = rgb;
}
int[] _return = new int[src.getCroppedWidth() * src.getCroppedHeight()];
int[] data = src.getPlaneData(0);
for(int i = 0; i < _return.length; ++i){
_return[i] = ReadableRGBContainer.toIntColor(data[3*i + 2], data[3*i + 1], data[3*i]);
}
return _return;
}
示例5: open
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
@Override
public void open(String _path, int width, int _height, int _fps) throws IOException {
path = _path;
height = _height;
fps = _fps;
ch = new FileChannelWrapper(FileChannel.open(Paths.get(path), StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING));
// Muxer that will store the encoded frames
muxer = new MP4Muxer(ch, Brand.MP4);
// Add video track to muxer
outTrack = muxer.addTrack(TrackType.VIDEO, fps);
// Allocate a buffer big enough to hold output frames
_out = ByteBuffer.allocateDirect(width * height * 6);
// Create an instance of encoder
encoder = new H264Encoder(new JCodecUtils.JHVRateControl(20));
// Encoder extra data ( SPS, PPS ) to be stored in a special place of MP4
spsList = new ArrayList<>();
ppsList = new ArrayList<>();
toEncode = Picture.create(width, height, ColorSpace.YUV420J);
}
示例6: encodeImage
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void encodeImage(BufferedImage bi) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(bi.getWidth(), bi.getHeight(), ColorSpace.YUV420);
}
// Perform conversion
for (int i = 0; i < 3; i++) {
Arrays.fill(toEncode.getData()[i], 0);
}
transform.transform(AWTUtil.fromBufferedImage(bi), toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(_out, toEncode);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.encodeMOVPacket(result, spsList, ppsList);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 25, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例7: interlaced
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private Picture[] interlaced(Picture dct) {
int mbWidth = (dct.getWidth() + 15) >> 4;
int mbHeight = (dct.getHeight() + 15) >> 4;
Picture field1 = Picture.create(dct.getWidth(), dct.getHeight() >> 1, dct.getColor());
Picture field2 = Picture.create(dct.getWidth(), dct.getHeight() >> 1, dct.getColor());
splitY(mbWidth, mbHeight, dct.getPlaneData(0), field1.getPlaneData(0), field2.getPlaneData(0),
dct.getPlaneData(3));
splitCbCr(mbWidth, mbHeight, dct.getPlaneData(1), field1.getPlaneData(1), field2.getPlaneData(1),
dct.getPlaneData(3));
splitCbCr(mbWidth, mbHeight, dct.getPlaneData(2), field1.getPlaneData(2), field2.getPlaneData(2),
dct.getPlaneData(3));
return new Picture[] { field1, field2 };
}
示例8: test
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private boolean test(File coded, File ref) throws IOException {
MappedH264ES es = new MappedH264ES(NIOUtils.fetchFrom(coded));
Picture buf = Picture.create(1920, 1088, ColorSpace.YUV420);
H264Decoder dec = new H264Decoder();
Packet nextFrame;
ByteBuffer _yuv = NIOUtils.fetchFrom(ref);
while ((nextFrame = es.nextFrame()) != null) {
Picture out = dec.decodeFrame(nextFrame.getData(), buf.getData()).cropped();
Picture pic = out.createCompatible();
pic.copyFrom(out);
int lumaSize = pic.getWidth() * pic.getHeight();
int crSize = lumaSize >> 2;
int cbSize = lumaSize >> 2;
ByteBuffer yuv = NIOUtils.read(_yuv, lumaSize + crSize + cbSize);
if (!Arrays.equals(getAsIntArray(yuv, lumaSize), pic.getPlaneData(0)))
return false;
if (!Arrays.equals(getAsIntArray(yuv, crSize), pic.getPlaneData(1)))
return false;
if (!Arrays.equals(getAsIntArray(yuv, cbSize), pic.getPlaneData(2)))
return false;
}
return true;
}
示例9: nextFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public Picture nextFrame(int[][] buffer) throws IOException {
if (invalidFormat != null)
throw new RuntimeException("Invalid input: " + invalidFormat);
long pos = is.position();
ByteBuffer buf = NIOUtils.fetchFrom(is, 2048);
String frame = readLine(buf);
if (frame == null || !frame.startsWith("FRAME"))
return null;
MappedByteBuffer pix = is.map(MapMode.READ_ONLY, pos + buf.position(), bufSize);
is.position(pos + buf.position() + bufSize);
Picture create = Picture.create(width, height, ColorSpace.YUV420);
copy(pix, create.getPlaneData(0));
copy(pix, create.getPlaneData(1));
copy(pix, create.getPlaneData(2));
return create;
}
示例10: encodeNativeFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void encodeNativeFrame(Picture pic) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(pic.getWidth(), pic.getHeight(), encoder.getSupportedColorSpaces()[0]);
}
// Perform conversion
transform.transform(pic, toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(toEncode, _out);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.wipePS(result, spsList, ppsList);
H264Utils.encodeMOVPacket(result);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 25, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
示例11: toBufferedImage
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public static BufferedImage toBufferedImage(Picture src) {
if (src.getColor() != ColorSpace.RGB) {
Transform transform = ColorUtil.getTransform(src.getColor(), ColorSpace.RGB);
Picture rgb = Picture.create(src.getWidth(), src.getHeight(), ColorSpace.RGB, src.getCrop());
transform.transform(src, rgb);
src = rgb;
}
BufferedImage dst = new BufferedImage(src.getCroppedWidth(), src.getCroppedHeight(),
BufferedImage.TYPE_3BYTE_BGR);
if (src.getCrop() == null)
toBufferedImage(src, dst);
else
toBufferedImageCropped(src, dst);
return dst;
}
示例12: encodeImage
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void encodeImage(Bitmap bi, int timeEachFrame) throws IOException {
setTimeEachFrame(timeEachFrame);
if (toEncode == null) {
toEncode = Picture.create(bi.getWidth(), bi.getHeight(), ColorSpace.YUV420);
}
// Perform conversion
for (int i = 0; i < 3; i++)
Arrays.fill(toEncode.getData()[i], 0);
transform.transform(fromBufferedImage(bi), toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(_out, toEncode);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.encodeMOVPacket(result, spsList, ppsList);
outTrack.addFrame(new MP4Packet(result,
frameNo, // frameNo * (this.timeEachFrame) = 5s, image will stop at second 5 and show the next image
timescale, // set default = 1. How many frame per duration: timescale = 2 duration = 1 => 0.5s show 1 image
duration, // auto-increase each time current duration = duration + pass duration.
frameNo,
true,
null,
frameNo,
0));
}
示例13: splitSlice
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private Picture splitSlice(Picture result, int mbX, int mbY, int sliceMbCount, boolean unsafe) {
Picture out = Picture.create(sliceMbCount << 4, 16, YUV422_10);
if (unsafe) {
Picture filled = Picture.create(sliceMbCount << 4, 16, YUV422_10);
ImageOP.subImageWithFill(result, filled, new Rect(mbX << 4, mbY << 4, sliceMbCount << 4, 16));
split(filled, out, 0, 0, sliceMbCount);
} else {
split(result, out, mbX, mbY, sliceMbCount);
}
return out;
}
示例14: colorCvt
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private Picture colorCvt(Picture in) {
Picture out;
if (in.getColor() == YUV422_10) {
out = in;
} else {
Transform trans = ColorUtil.getTransform(in.getColor(), YUV422_10);
out = Picture.create(in.getWidth(), in.getHeight(), YUV422_10);
trans.transform(in, out);
}
return out;
}
示例15: sliceData
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private Picture sliceData(Picture source, int mbX, int mbY, int mbWidth, int sliceMbCount) {
Picture pic = Picture.create(sliceMbCount << 4, 16, YUV422_10);
int[][] out = pic.getData();
int[][] in = source.getData();
System.arraycopy(in[0], (mbY * mbWidth + mbX) << 8, out[0], 0, out[0].length);
System.arraycopy(in[1], (mbY * mbWidth + mbX) << 7, out[1], 0, out[1].length);
System.arraycopy(in[2], (mbY * mbWidth + mbX) << 7, out[2], 0, out[2].length);
return pic;
}