本文整理汇总了Java中org.monte.media.io.ByteArrayImageInputStream.readInt方法的典型用法代码示例。如果您正苦于以下问题:Java ByteArrayImageInputStream.readInt方法的具体用法?Java ByteArrayImageInputStream.readInt怎么用?Java ByteArrayImageInputStream.readInt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.monte.media.io.ByteArrayImageInputStream
的用法示例。
在下文中一共展示了ByteArrayImageInputStream.readInt方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readAVIH
import org.monte.media.io.ByteArrayImageInputStream; //导入方法依赖的package包/类
/**
* Reads the AVI Main Header and returns a MainHeader object.
*/
private MainHeader readAVIH(byte[] data) throws IOException, ParseException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
MainHeader mh = new MainHeader();
mh.microSecPerFrame = in.readUnsignedInt();
mh.maxBytesPerSec = in.readUnsignedInt();
mh.paddingGranularity = in.readUnsignedInt();
mh.flags = in.readInt();
mh.totalFrames = in.readUnsignedInt();
mh.initialFrames = in.readUnsignedInt();
mh.streams = in.readUnsignedInt();
mh.suggestedBufferSize = in.readUnsignedInt();
mh.size = new Dimension(in.readInt(), in.readInt());
return mh;
}
示例2: decodePalette
import org.monte.media.io.ByteArrayImageInputStream; //导入方法依赖的package包/类
/** Decodes an AVI palette change chunk.
* FIXME - This could be moved out into a separate class.
*/
public void decodePalette(byte[] inDat, int off, int len) throws IOException {
getPalette();
ByteArrayImageInputStream in = new ByteArrayImageInputStream(inDat, off, len, ByteOrder.LITTLE_ENDIAN);
int firstEntry = in.readUnsignedByte();
int numEntries = in.readUnsignedByte();
if (numEntries == 0) {
numEntries = 256;
}
int flags = in.readUnsignedShort();
if (firstEntry + numEntries > 256) {
throw new IOException("Illegal headers in pc chunk. firstEntry=" + firstEntry + ", numEntries=" + numEntries);
}
in.setByteOrder(ByteOrder.BIG_ENDIAN);
for (int i = 0; i < numEntries; i++) {
int rgbf = in.readInt();
palette[i + firstEntry] = rgbf >> 8;
}
}
示例3: readVideoSTRF
import org.monte.media.io.ByteArrayImageInputStream; //导入方法依赖的package包/类
/**
* </pre> //---------------------- // AVI Bitmap Info Header //
* ---------------------- typedef struct { BYTE blue; BYTE green; BYTE red;
* BYTE reserved; } RGBQUAD;
*
* // Values for this enum taken from: //
* http://www.fourcc.org/index.php?http%3A//www.fourcc.org/rgb.php enum {
* BI_RGB = 0x00000000, RGB = 0x32424752, // Alias for BI_RGB BI_RLE8 =
* 0x01000000, RLE8 = 0x38454C52, // Alias for BI_RLE8 BI_RLE4 = 0x00000002,
* RLE4 = 0x34454C52, // Alias for BI_RLE4 BI_BITFIELDS = 0x00000003, raw =
* 0x32776173, RGBA = 0x41424752, RGBT = 0x54424752, cvid = "cvid" }
* bitmapCompression;
*
* typedef struct { DWORD structSize; DWORD width; DWORD height; WORD
* planes; WORD bitCount; FOURCC enum bitmapCompression compression; DWORD
* imageSizeInBytes; DWORD xPelsPerMeter; DWORD yPelsPerMeter; DWORD
* numberOfColorsUsed; DWORD numberOfColorsImportant; RGBQUAD colors[]; }
* BITMAPINFOHEADER;
* </pre>
*
*
* @param tr
* @param data
* @throws java.io.IOException
*/
private void readVideoSTRF(VideoTrack tr, byte[] data) throws IOException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
long structSize = in.readUnsignedInt();
tr.width = in.readInt();
tr.height = in.readInt();
tr.planes = in.readUnsignedShort();
tr.bitCount = in.readUnsignedShort();
in.setByteOrder(ByteOrder.BIG_ENDIAN);
tr.compression = intToType(in.readInt());
in.setByteOrder(ByteOrder.LITTLE_ENDIAN);
long imageSizeInBytes = in.readUnsignedInt();
tr.xPelsPerMeter = in.readUnsignedInt();
tr.yPelsPerMeter = in.readUnsignedInt();
tr.clrUsed = in.readUnsignedInt();
tr.clrImportant = in.readUnsignedInt();
if (tr.bitCount == 0) {
tr.bitCount = (int) (imageSizeInBytes / tr.width / tr.height * 8);
}
tr.format = new Format(MimeTypeKey, MIME_AVI,
MediaTypeKey, MediaType.VIDEO,
EncodingKey, tr.compression,
DataClassKey, byte[].class,
WidthKey, tr.width,
HeightKey, tr.height,
DepthKey, tr.bitCount,
PixelAspectRatioKey, new Rational(1, 1),
FrameRateKey, new Rational(tr.rate, tr.scale),
FixedFrameRateKey, true);
}
示例4: readVideoSTRF
import org.monte.media.io.ByteArrayImageInputStream; //导入方法依赖的package包/类
/**
* </pre> //---------------------- // AVI Bitmap Info Header //
* ---------------------- typedef struct { BYTE blue; BYTE green; BYTE red;
* BYTE reserved; } RGBQUAD;
*
* // Values for this enum taken from: //
* http://www.fourcc.org/index.php?http%3A//www.fourcc.org/rgb.php enum {
* BI_RGB = 0x00000000, RGB = 0x32424752, // Alias for BI_RGB BI_RLE8 =
* 0x01000000, RLE8 = 0x38454C52, // Alias for BI_RLE8 BI_RLE4 = 0x00000002,
* RLE4 = 0x34454C52, // Alias for BI_RLE4 BI_BITFIELDS = 0x00000003, raw =
* 0x32776173, RGBA = 0x41424752, RGBT = 0x54424752, cvid = "cvid" }
* bitmapCompression;
*
* typedef struct { DWORD structSize; DWORD width; DWORD height; WORD
* planes; WORD bitCount; FOURCC enum bitmapCompression compression; DWORD
* imageSizeInBytes; DWORD xPelsPerMeter; DWORD yPelsPerMeter; DWORD
* numberOfColorsUsed; DWORD numberOfColorsImportant; RGBQUAD colors[]; }
* BITMAPINFOHEADER;
* </pre>
*
*
* @param tr
* @param data
* @throws IOException
*/
private void readVideoSTRF(VideoTrack tr, byte[] data) throws IOException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
long structSize = in.readUnsignedInt();
tr.width = in.readInt();
tr.height = in.readInt();
tr.planes = in.readUnsignedShort();
tr.bitCount = in.readUnsignedShort();
in.setByteOrder(ByteOrder.BIG_ENDIAN);
tr.compression = intToType(in.readInt());
in.setByteOrder(ByteOrder.LITTLE_ENDIAN);
long imageSizeInBytes = in.readUnsignedInt();
tr.xPelsPerMeter = in.readUnsignedInt();
tr.yPelsPerMeter = in.readUnsignedInt();
tr.clrUsed = in.readUnsignedInt();
tr.clrImportant = in.readUnsignedInt();
if (tr.bitCount == 0) {
tr.bitCount = (int) (imageSizeInBytes / tr.width / tr.height * 8);
}
tr.format = new Format(MimeTypeKey, MIME_AVI,
MediaTypeKey, MediaType.VIDEO,
EncodingKey, tr.compression,
DataClassKey, byte[].class,
WidthKey, tr.width,
HeightKey, tr.height,
DepthKey, tr.bitCount,
PixelAspectRatioKey, new Rational(1, 1),
FrameRateKey, new Rational(tr.rate, tr.scale),
FixedFrameRateKey, true);
}
示例5: readSTRH
import org.monte.media.io.ByteArrayImageInputStream; //导入方法依赖的package包/类
/**
* Reads an AVI Stream Header and returns a Track object.
*/
/*typedef struct {
* FOURCC enum aviStrhType type;
* // Contains a FOURCC that specifies the type of the data contained in
* // the stream. The following standard AVI values for video and audio are
* // defined.
* FOURCC handler;
* DWORD set aviStrhFlags flags;
* WORD priority;
* WORD language;
* DWORD initialFrames;
* DWORD scale;
* DWORD rate;
* DWORD startTime;
* DWORD length;
* DWORD suggestedBufferSize;
* DWORD quality;
* DWORD sampleSize;
* aviRectangle frame;
* } AVISTREAMHEADER; */
private Track readSTRH(byte[] data) throws IOException, ParseException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
Track tr = null;
in.setByteOrder(ByteOrder.BIG_ENDIAN);
String type = intToType(in.readInt());
in.setByteOrder(ByteOrder.LITTLE_ENDIAN);
int handler = in.readInt();
if (type.equals(AVIMediaType.AUDIO.fccType)) {
tr = new AudioTrack(tracks.size(), handler);
} else if (type.equals(AVIMediaType.VIDEO.fccType)) {
tr = new VideoTrack(tracks.size(), handler, null);
} else if (type.equals(AVIMediaType.MIDI.fccType)) {
tr = new MidiTrack(tracks.size(), handler);
} else if (type.equals(AVIMediaType.TEXT.fccType)) {
tr = new TextTrack(tracks.size(), handler);
} else {
throw new ParseException("Unknown track type " + type);
}
tr.fccHandler = handler;
tr.flags = in.readInt();
tr.priority = in.readUnsignedShort();
tr.language = in.readUnsignedShort();
tr.initialFrames = in.readUnsignedInt();
tr.scale = in.readUnsignedInt();
tr.rate = in.readUnsignedInt();
tr.startTime = in.readUnsignedInt();
tr.length = in.readUnsignedInt();
/*tr.suggestedBufferSize=*/ in.readUnsignedInt();
tr.quality = in.readInt();
/*tr.sampleSize=*/ in.readUnsignedInt();
tr.frameLeft = in.readShort();
tr.frameTop = in.readShort();
tr.frameRight = in.readShort();
tr.frameBottom = in.readShort();
return tr;
}
示例6: readMPEntries
import org.monte.media.io.ByteArrayImageInputStream; //导入方法依赖的package包/类
/**
* imageCount*16 byte MP Entry Information.
*/
private void readMPEntries(TIFFInputStream tin, IFDEntry mpEntryInformation, TIFFDirectory parent, ArrayList<FileSegment> tiffSeg) throws IOException {
byte[] buf = (byte[]) mpEntryInformation.readData(tin);
TagSet tagSet = MPEntryTagSet.getInstance();
ByteArrayImageInputStream in = new ByteArrayImageInputStream(buf);
ByteOrder bo = tin.getByteOrder();
in.setByteOrder(bo);
int numImages = (int) mpEntryInformation.getLength() / 16;
try {
for (int imageCount = 0; imageCount < numImages; imageCount++) {
TIFFDirectory ifdNode = new TIFFDirectory(tagSet, tagSet.getTag(MPFTagSet.TAG_MPEntryInformation), imageCount, mpEntryInformation.getValueOffset(), 16 * imageCount, tiffSeg);
parent.add(ifdNode);
int imageAttr = in.readInt();
short dpif = (short) (imageAttr >>> 31);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentParentImageFlag), dpif));
short dcif = (short) ((imageAttr >>> 30) & 1);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentChildImageFlag), dcif));
short rif = (short) ((imageAttr >>> 29) & 1);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_RepresentativeImageFlag), rif));
short idf = (short) ((imageAttr >>> 24) & 7);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_ImageDataFormat), idf));
long mptc = (imageAttr & 0xffffffL);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_MPTypeCode), mptc));
// Read the individual image size
long imageSize = in.readInt() & 0xffffffffL;
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_IndividualImageSize), imageSize));
// Read the individual data offset
long imageOffset = in.readInt() & 0xffffffffL;
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_IndividualImageDataOffset), imageOffset));
imageOffsets.add(imageOffset);
// Read the dependent image 1 entry number
int dependentImageEntryNumber = in.readUnsignedShort();
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentImage1EntryNumber), dependentImageEntryNumber));
// Read the dependent image 2 entry number
dependentImageEntryNumber = in.readUnsignedShort();
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentImage2EntryNumber), dependentImageEntryNumber));
}
} catch (IOException e) {
e.printStackTrace();
} finally {
in.close();
}
}