本文整理汇总了Java中org.jcodec.common.model.Picture.getHeight方法的典型用法代码示例。如果您正苦于以下问题:Java Picture.getHeight方法的具体用法?Java Picture.getHeight怎么用?Java Picture.getHeight使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.jcodec.common.model.Picture
的用法示例。
在下文中一共展示了Picture.getHeight方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: interlaced
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private Picture[] interlaced(Picture dct) {
int mbWidth = (dct.getWidth() + 15) >> 4;
int mbHeight = (dct.getHeight() + 15) >> 4;
Picture field1 = Picture.create(dct.getWidth(), dct.getHeight() >> 1, dct.getColor());
Picture field2 = Picture.create(dct.getWidth(), dct.getHeight() >> 1, dct.getColor());
splitY(mbWidth, mbHeight, dct.getPlaneData(0), field1.getPlaneData(0), field2.getPlaneData(0),
dct.getPlaneData(3));
splitCbCr(mbWidth, mbHeight, dct.getPlaneData(1), field1.getPlaneData(1), field2.getPlaneData(1),
dct.getPlaneData(3));
splitCbCr(mbWidth, mbHeight, dct.getPlaneData(2), field1.getPlaneData(2), field2.getPlaneData(2),
dct.getPlaneData(3));
return new Picture[] { field1, field2 };
}
示例2: encodeFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public ByteBuffer encodeFrame(Picture picture) {
if (picture.getColor() != ColorSpace.RGB)
throw new IllegalArgumentException("Only RGB image can be stored in PPM");
ByteBuffer buffer = ByteBuffer.allocate(picture.getWidth() * picture.getHeight() * 3 + 200);
buffer.put(JCodecUtil.asciiString("P6 " + picture.getWidth() + " " + picture.getHeight() + " 255\n"));
int[][] data = picture.getData();
for (int i = 0; i < picture.getWidth() * picture.getHeight() * 3; i += 3) {
buffer.put((byte) data[0][i + 2]);
buffer.put((byte) data[0][i + 1]);
buffer.put((byte) data[0][i]);
}
buffer.flip();
return buffer;
}
示例3: transform
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void transform(Picture src, Picture dst) {
int[] y = src.getPlaneData(0);
int[] u = src.getPlaneData(1);
int[] v = src.getPlaneData(2);
int[] data = dst.getPlaneData(0);
int offLuma = 0, offChroma = 0;
for (int i = 0; i < dst.getHeight(); i++) {
for (int j = 0; j < dst.getWidth(); j += 2) {
YUVJtoRGB(y[offLuma], u[offChroma], v[offChroma], data, offLuma * 3);
YUVJtoRGB(y[offLuma + 1], u[offChroma], v[offChroma], data, (offLuma + 1) * 3);
offLuma += 2;
++offChroma;
}
}
}
示例4: transform
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void transform(Picture src, Picture dst) {
int lumaSize = src.getWidth() * src.getHeight();
System.arraycopy(src.getPlaneData(0), 0, dst.getPlaneData(0), 0, lumaSize);
copyAvg(src.getPlaneData(1), dst.getPlaneData(1), src.getPlaneWidth(1), src.getPlaneHeight(1));
copyAvg(src.getPlaneData(2), dst.getPlaneData(2), src.getPlaneWidth(2), src.getPlaneHeight(2));
if (shiftUp > shiftDown) {
up(dst.getPlaneData(0), shiftUp - shiftDown);
up(dst.getPlaneData(1), shiftUp - shiftDown);
up(dst.getPlaneData(2), shiftUp - shiftDown);
} else if (shiftDown > shiftUp) {
down(dst.getPlaneData(0), shiftDown - shiftUp);
down(dst.getPlaneData(1), shiftDown - shiftUp);
down(dst.getPlaneData(2), shiftDown - shiftUp);
}
}
示例5: transform
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void transform(Picture src, Picture dst) {
int[] y = src.getPlaneData(0);
int[] u = src.getPlaneData(1);
int[] v = src.getPlaneData(2);
int[] data = dst.getPlaneData(0);
int offLuma = 0, offChroma = 0;
for (int i = 0; i < dst.getHeight(); i++) {
for (int j = 0; j < dst.getWidth(); j += 2) {
YUV444toRGB888((y[offLuma] << upShift) >> downShift, (u[offChroma] << upShift) >> downShift,
(v[offChroma] << upShift) >> downShift, data, offLuma * 3);
YUV444toRGB888((y[offLuma + 1] << upShift) >> downShift, (u[offChroma] << upShift) >> downShift,
(v[offChroma] << upShift) >> downShift, data, (offLuma + 1) * 3);
offLuma += 2;
++offChroma;
}
}
}
示例6: test
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
private boolean test(File coded, File ref) throws IOException {
MappedH264ES es = new MappedH264ES(NIOUtils.fetchFrom(coded));
Picture buf = Picture.create(1920, 1088, ColorSpace.YUV420);
H264Decoder dec = new H264Decoder();
Packet nextFrame;
ByteBuffer _yuv = NIOUtils.fetchFrom(ref);
while ((nextFrame = es.nextFrame()) != null) {
Picture out = dec.decodeFrame(nextFrame.getData(), buf.getData()).cropped();
Picture pic = out.createCompatible();
pic.copyFrom(out);
int lumaSize = pic.getWidth() * pic.getHeight();
int crSize = lumaSize >> 2;
int cbSize = lumaSize >> 2;
ByteBuffer yuv = NIOUtils.read(_yuv, lumaSize + crSize + cbSize);
if (!Arrays.equals(getAsIntArray(yuv, lumaSize), pic.getPlaneData(0)))
return false;
if (!Arrays.equals(getAsIntArray(yuv, crSize), pic.getPlaneData(1)))
return false;
if (!Arrays.equals(getAsIntArray(yuv, cbSize), pic.getPlaneData(2)))
return false;
}
return true;
}
示例7: encodePicture
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
protected void encodePicture(ByteBuffer out, int[][] scaledLuma, int[][] scaledChroma, int[] scan, Picture picture) {
int mbWidth = (picture.getWidth() + 15) >> 4;
int mbHeight = (picture.getHeight() + 15) >> 4;
int qp = profile.firstQp;
int nSlices = calcNSlices(mbWidth, mbHeight);
writePictureHeader(LOG_DEFAULT_SLICE_MB_WIDTH, nSlices, out);
ByteBuffer fork = out.duplicate();
NIOUtils.skip(out, nSlices << 1);
int i = 0;
int[] total = new int[nSlices];
for (int mbY = 0; mbY < mbHeight; mbY++) {
int mbX = 0;
int sliceMbCount = DEFAULT_SLICE_MB_WIDTH;
while (mbX < mbWidth) {
while (mbWidth - mbX < sliceMbCount)
sliceMbCount >>= 1;
int sliceStart = out.position();
boolean unsafeBottom = (picture.getHeight() % 16) != 0 && mbY == mbHeight - 1;
boolean unsafeRight = (picture.getWidth() % 16) != 0 && mbX + sliceMbCount == mbWidth;
qp = encodeSlice(out, scaledLuma, scaledChroma, scan, sliceMbCount, mbX, mbY, picture, qp, mbWidth,
mbHeight, unsafeBottom || unsafeRight);
fork.putShort((short) (out.position() - sliceStart));
total[i++] = (short) (out.position() - sliceStart);
mbX += sliceMbCount;
}
}
}
示例8: decodeField
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
@Override
public Picture decodeField(ByteBuffer data, int[][] data2, int field, int step) {
Picture res = super.decodeField(data, data2, field, step);
return new Picture(res.getWidth() >> 1, res.getHeight() >> 1, res.getData(), res.getColor(), new Rect(0, 0,
res.getCroppedWidth() >> 1, res.getCroppedHeight() >> 1));
}
示例9: decodeField
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
@Override
public Picture decodeField(ByteBuffer data, int[][] data2, int field, int step) {
Picture res = super.decodeField(data, data2, field, step);
return new Picture(res.getWidth() >> 2, res.getHeight() >> 2, res.getData(), res.getColor(), new Rect(0, 0,
res.getCroppedWidth() >> 2, res.getCroppedHeight() >> 2));
}
示例10: decodeFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public Picture decodeFrame(ByteBuffer data, int[][] data2) {
if (interlace) {
Picture r1 = decodeField(data, data2, topFieldFirst ? 0 : 1, 2);
Picture r2 = decodeField(data, data2, topFieldFirst ? 1 : 0, 2);
return new Picture(r1.getWidth(), r1.getHeight() << 1, data2, r1.getColor());
} else {
return decodeField(data, data2, 0, 1);
}
}
示例11: getBlockLuma
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
/**
* Get block of ( possibly interpolated ) luma pixels
*/
public static void getBlockLuma(Picture pic, Picture out, int off, int x, int y, int w, int h) {
int xInd = x & 0x3;
int yInd = y & 0x3;
int xFp = x >> 2;
int yFp = y >> 2;
if (xFp < 2 || yFp < 2 || xFp > pic.getWidth() - w - 5 || yFp > pic.getHeight() - h - 5) {
unsafe[(yInd << 2) + xInd].getLuma(pic.getData()[0], pic.getWidth(), pic.getHeight(), out.getPlaneData(0),
off, out.getPlaneWidth(0), xFp, yFp, w, h);
} else {
safe[(yInd << 2) + xInd].getLuma(pic.getData()[0], pic.getWidth(), pic.getHeight(), out.getPlaneData(0),
off, out.getPlaneWidth(0), xFp, yFp, w, h);
}
}
示例12: encodeFrame
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public ByteBuffer encodeFrame(ByteBuffer _out, Picture frame) throws IOException {
ByteBuffer out = _out.duplicate();
out.order(ByteOrder.LITTLE_ENDIAN);
int tgtStride = ((frame.getPlaneWidth(0) + 47) / 48) * 48;
int[][] data = frame.getData();
int[] tmpY = new int[tgtStride];
int[] tmpCb = new int[tgtStride >> 1];
int[] tmpCr = new int[tgtStride >> 1];
int yOff = 0, cbOff = 0, crOff = 0;
for (int yy = 0; yy < frame.getHeight(); yy++) {
arraycopy(data[0], yOff, tmpY, 0, frame.getPlaneWidth(0));
arraycopy(data[1], cbOff, tmpCb, 0, frame.getPlaneWidth(1));
arraycopy(data[2], crOff, tmpCr, 0, frame.getPlaneWidth(2));
for (int yi = 0, cbi = 0, cri = 0; yi < tgtStride;) {
int i = 0;
i |= clip(tmpCr[cri++]) << 20;
i |= clip(tmpY[yi++]) << 10;
i |= clip(tmpCb[cbi++]);
out.putInt(i);
i = 0;
i |= clip(tmpY[yi++]);
i |= clip(tmpY[yi++]) << 20;
i |= clip(tmpCb[cbi++]) << 10;
out.putInt(i);
i = 0;
i |= clip(tmpCb[cbi++]) << 20;
i |= clip(tmpY[yi++]) << 10;
i |= clip(tmpCr[cri++]);
out.putInt(i);
i = 0;
i |= clip(tmpY[yi++]);
i |= clip(tmpY[yi++]) << 20;
i |= clip(tmpCr[cri++]) << 10;
out.putInt(i);
}
yOff += frame.getPlaneWidth(0);
cbOff += frame.getPlaneWidth(1);
crOff += frame.getPlaneWidth(2);
}
out.flip();
return out;
}
示例13: transform
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void transform(Picture src, Picture dst) {
int[] y = src.getPlaneData(0);
int[] u = src.getPlaneData(1);
int[] v = src.getPlaneData(2);
int[] data = dst.getPlaneData(0);
for (int i = 0, srcOff = 0, dstOff = 0; i < dst.getHeight(); i++) {
for (int j = 0; j < dst.getWidth(); j++, srcOff++, dstOff += 3) {
YUV444toRGB888((y[srcOff] << upShift) >> downShift, (u[srcOff] << upShift) >> downShift,
(v[srcOff] << upShift) >> downShift, data, dstOff);
}
}
}
示例14: transform
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void transform(Picture src, Picture dst) {
int[] y = src.getPlaneData(0);
int[] u = src.getPlaneData(1);
int[] v = src.getPlaneData(2);
int[] data = dst.getPlaneData(0);
for (int i = 0, srcOff = 0, dstOff = 0; i < dst.getHeight(); i++) {
for (int j = 0; j < dst.getWidth(); j++, srcOff++, dstOff += 3) {
YUVJtoRGB(y[srcOff], u[srcOff], v[srcOff], data, dstOff);
}
}
}
示例15: transform
import org.jcodec.common.model.Picture; //导入方法依赖的package包/类
public void transform(Picture img, Picture dst) {
int[] y = img.getData()[0];
int[][] dstData = dst.getData();
int offChr = 0, offLuma = 0, offSrc = 0, strideSrc = img.getWidth() * 3, strideDst = dst.getWidth();
for (int i = 0; i < img.getHeight() >> 1; i++) {
for (int j = 0; j < img.getWidth() >> 1; j++) {
dstData[1][offChr] = 0;
dstData[2][offChr] = 0;
rgb2yuv(y[offSrc], y[offSrc + 1], y[offSrc + 2], dstData[0], offLuma, dstData[1], offChr, dstData[2],
offChr);
dstData[0][offLuma] = (dstData[0][offLuma] << upShift) >> downShift;
rgb2yuv(y[offSrc + strideSrc], y[offSrc + strideSrc + 1], y[offSrc + strideSrc + 2], dstData[0],
offLuma + strideDst, dstData[1], offChr, dstData[2], offChr);
dstData[0][offLuma + strideDst] = (dstData[0][offLuma + strideDst] << upShift) >> downShift;
++offLuma;
rgb2yuv(y[offSrc + 3], y[offSrc + 4], y[offSrc + 5], dstData[0], offLuma, dstData[1], offChr,
dstData[2], offChr);
dstData[0][offLuma] = (dstData[0][offLuma] << upShift) >> downShift;
rgb2yuv(y[offSrc + strideSrc + 3], y[offSrc + strideSrc + 4], y[offSrc + strideSrc + 5], dstData[0],
offLuma + strideDst, dstData[1], offChr, dstData[2], offChr);
dstData[0][offLuma + strideDst] = (dstData[0][offLuma + strideDst] << upShift) >> downShift;
++offLuma;
dstData[1][offChr] = (dstData[1][offChr] << upShift) >> downShiftChr;
dstData[2][offChr] = (dstData[2][offChr] << upShift) >> downShiftChr;
++offChr;
offSrc += 6;
}
offLuma += strideDst;
offSrc += strideSrc;
}
}