本文整理汇总了Java中org.monte.media.io.ByteArrayImageInputStream类的典型用法代码示例。如果您正苦于以下问题:Java ByteArrayImageInputStream类的具体用法?Java ByteArrayImageInputStream怎么用?Java ByteArrayImageInputStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ByteArrayImageInputStream类属于org.monte.media.io包,在下文中一共展示了ByteArrayImageInputStream类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readAVIH
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* Reads the AVI Main Header and returns a MainHeader object.
*/
private MainHeader readAVIH(byte[] data) throws IOException, ParseException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
MainHeader mh = new MainHeader();
mh.microSecPerFrame = in.readUnsignedInt();
mh.maxBytesPerSec = in.readUnsignedInt();
mh.paddingGranularity = in.readUnsignedInt();
mh.flags = in.readInt();
mh.totalFrames = in.readUnsignedInt();
mh.initialFrames = in.readUnsignedInt();
mh.streams = in.readUnsignedInt();
mh.suggestedBufferSize = in.readUnsignedInt();
mh.size = new Dimension(in.readInt(), in.readInt());
return mh;
}
示例2: decodePalette
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/** Decodes an AVI palette change chunk.
* FIXME - This could be moved out into a separate class.
*/
public void decodePalette(byte[] inDat, int off, int len) throws IOException {
getPalette();
ByteArrayImageInputStream in = new ByteArrayImageInputStream(inDat, off, len, ByteOrder.LITTLE_ENDIAN);
int firstEntry = in.readUnsignedByte();
int numEntries = in.readUnsignedByte();
if (numEntries == 0) {
numEntries = 256;
}
int flags = in.readUnsignedShort();
if (firstEntry + numEntries > 256) {
throw new IOException("Illegal headers in pc chunk. firstEntry=" + firstEntry + ", numEntries=" + numEntries);
}
in.setByteOrder(ByteOrder.BIG_ENDIAN);
for (int i = 0; i < numEntries; i++) {
int rgbf = in.readInt();
palette[i + firstEntry] = rgbf >> 8;
}
}
示例3: decode
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
public int decode(Buffer in, Buffer out) {
out.setMetaTo(in);
out.format = outputFormat;
if (in.isFlag(DISCARD)) {
return CODEC_OK;
}
byte[] data = (byte[]) in.data;
if (data == null) {
out.setFlag(DISCARD);
return CODEC_FAILED;
}
ByteArrayImageInputStream tmp = new ByteArrayImageInputStream(data);
try {
// ImageReader ir = (ImageReader) ImageIO.getImageReadersByMIMEType("image/jpeg").next();
ImageReader ir = new MJPGImageReader(new MJPGImageReaderSpi());
ir.setInput(tmp);
out.data = ir.read(0);
ir.dispose();
out.sampleCount = 1;
out.offset = 0;
out.length = (int) tmp.getStreamPosition();
return CODEC_OK;
} catch (IOException ex) {
ex.printStackTrace();
out.setFlag(DISCARD);
return CODEC_FAILED;
}
}
示例4: readVideoSTRF
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* </pre> //---------------------- // AVI Bitmap Info Header //
* ---------------------- typedef struct { BYTE blue; BYTE green; BYTE red;
* BYTE reserved; } RGBQUAD;
*
* // Values for this enum taken from: //
* http://www.fourcc.org/index.php?http%3A//www.fourcc.org/rgb.php enum {
* BI_RGB = 0x00000000, RGB = 0x32424752, // Alias for BI_RGB BI_RLE8 =
* 0x01000000, RLE8 = 0x38454C52, // Alias for BI_RLE8 BI_RLE4 = 0x00000002,
* RLE4 = 0x34454C52, // Alias for BI_RLE4 BI_BITFIELDS = 0x00000003, raw =
* 0x32776173, RGBA = 0x41424752, RGBT = 0x54424752, cvid = "cvid" }
* bitmapCompression;
*
* typedef struct { DWORD structSize; DWORD width; DWORD height; WORD
* planes; WORD bitCount; FOURCC enum bitmapCompression compression; DWORD
* imageSizeInBytes; DWORD xPelsPerMeter; DWORD yPelsPerMeter; DWORD
* numberOfColorsUsed; DWORD numberOfColorsImportant; RGBQUAD colors[]; }
* BITMAPINFOHEADER;
* </pre>
*
*
* @param tr
* @param data
* @throws java.io.IOException
*/
private void readVideoSTRF(VideoTrack tr, byte[] data) throws IOException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
long structSize = in.readUnsignedInt();
tr.width = in.readInt();
tr.height = in.readInt();
tr.planes = in.readUnsignedShort();
tr.bitCount = in.readUnsignedShort();
in.setByteOrder(ByteOrder.BIG_ENDIAN);
tr.compression = intToType(in.readInt());
in.setByteOrder(ByteOrder.LITTLE_ENDIAN);
long imageSizeInBytes = in.readUnsignedInt();
tr.xPelsPerMeter = in.readUnsignedInt();
tr.yPelsPerMeter = in.readUnsignedInt();
tr.clrUsed = in.readUnsignedInt();
tr.clrImportant = in.readUnsignedInt();
if (tr.bitCount == 0) {
tr.bitCount = (int) (imageSizeInBytes / tr.width / tr.height * 8);
}
tr.format = new Format(MimeTypeKey, MIME_AVI,
MediaTypeKey, MediaType.VIDEO,
EncodingKey, tr.compression,
DataClassKey, byte[].class,
WidthKey, tr.width,
HeightKey, tr.height,
DepthKey, tr.bitCount,
PixelAspectRatioKey, new Rational(1, 1),
FrameRateKey, new Rational(tr.rate, tr.scale),
FixedFrameRateKey, true);
}
示例5: readAVIstrdChunk
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* Reads the Exif metadata from an AVI RIFF file.
*/
public void readAVIstrdChunk(byte[] data) throws IOException {
int track = 0; // track number
int scan = 0;
root = new TIFFDirectory(null, null, -1);
TIFFDirectory trackNode = new TIFFDirectory(TrackTagSet.getInstance(), null, track, null, null, new FileSegment(0, data.length));
root.add(trackNode);
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, 8, (int) data.length - 8, ByteOrder.LITTLE_ENDIAN);
TIFFInputStream tin = new TIFFInputStream(in, ByteOrder.LITTLE_ENDIAN, 0);
ArrayList<FileSegment> tiffSeg = new ArrayList<FileSegment>();
tiffSeg.add(new FileSegment(scan + 8, data.length - 8));
readTIFFIFD(tin, trackNode, tiffSeg);
}
示例6: getThumbnails
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* Returns all thumbnails.
*/
public ArrayList<BufferedImage> getThumbnails(boolean suppressException) throws IOException {
ArrayList<BufferedImage> thumbnails = new ArrayList<BufferedImage>();
Stack<TIFFDirectory> stack = new Stack<TIFFDirectory>();
stack.push((TIFFDirectory) getMetaDataTree());
if (stack.peek() == null) {
return thumbnails;
}
while (!stack.isEmpty()) {
TIFFDirectory dir = stack.pop();
for (TIFFNode node : dir.getChildren()) {
if (node instanceof TIFFDirectory) {
stack.push((TIFFDirectory) node);
} else if (node instanceof TIFFField) {
TIFFField field = (TIFFField) node;
if (field.getTag() == BaselineTagSet.JPEGThumbnailImage) {
try {
thumbnails.add(0, ImageIO.read(new ByteArrayImageInputStream((byte[]) field.getData())));
// must insert first because we traverse in post-order
} catch (IOException e) {
if (!suppressException) {
throw e;
}
}
}
}
}
}
return thumbnails;
}
示例7: readVideoSTRF
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* </pre> //---------------------- // AVI Bitmap Info Header //
* ---------------------- typedef struct { BYTE blue; BYTE green; BYTE red;
* BYTE reserved; } RGBQUAD;
*
* // Values for this enum taken from: //
* http://www.fourcc.org/index.php?http%3A//www.fourcc.org/rgb.php enum {
* BI_RGB = 0x00000000, RGB = 0x32424752, // Alias for BI_RGB BI_RLE8 =
* 0x01000000, RLE8 = 0x38454C52, // Alias for BI_RLE8 BI_RLE4 = 0x00000002,
* RLE4 = 0x34454C52, // Alias for BI_RLE4 BI_BITFIELDS = 0x00000003, raw =
* 0x32776173, RGBA = 0x41424752, RGBT = 0x54424752, cvid = "cvid" }
* bitmapCompression;
*
* typedef struct { DWORD structSize; DWORD width; DWORD height; WORD
* planes; WORD bitCount; FOURCC enum bitmapCompression compression; DWORD
* imageSizeInBytes; DWORD xPelsPerMeter; DWORD yPelsPerMeter; DWORD
* numberOfColorsUsed; DWORD numberOfColorsImportant; RGBQUAD colors[]; }
* BITMAPINFOHEADER;
* </pre>
*
*
* @param tr
* @param data
* @throws IOException
*/
private void readVideoSTRF(VideoTrack tr, byte[] data) throws IOException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
long structSize = in.readUnsignedInt();
tr.width = in.readInt();
tr.height = in.readInt();
tr.planes = in.readUnsignedShort();
tr.bitCount = in.readUnsignedShort();
in.setByteOrder(ByteOrder.BIG_ENDIAN);
tr.compression = intToType(in.readInt());
in.setByteOrder(ByteOrder.LITTLE_ENDIAN);
long imageSizeInBytes = in.readUnsignedInt();
tr.xPelsPerMeter = in.readUnsignedInt();
tr.yPelsPerMeter = in.readUnsignedInt();
tr.clrUsed = in.readUnsignedInt();
tr.clrImportant = in.readUnsignedInt();
if (tr.bitCount == 0) {
tr.bitCount = (int) (imageSizeInBytes / tr.width / tr.height * 8);
}
tr.format = new Format(MimeTypeKey, MIME_AVI,
MediaTypeKey, MediaType.VIDEO,
EncodingKey, tr.compression,
DataClassKey, byte[].class,
WidthKey, tr.width,
HeightKey, tr.height,
DepthKey, tr.bitCount,
PixelAspectRatioKey, new Rational(1, 1),
FrameRateKey, new Rational(tr.rate, tr.scale),
FixedFrameRateKey, true);
}
示例8: readSTRH
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* Reads an AVI Stream Header and returns a Track object.
*/
/*typedef struct {
* FOURCC enum aviStrhType type;
* // Contains a FOURCC that specifies the type of the data contained in
* // the stream. The following standard AVI values for video and audio are
* // defined.
* FOURCC handler;
* DWORD set aviStrhFlags flags;
* WORD priority;
* WORD language;
* DWORD initialFrames;
* DWORD scale;
* DWORD rate;
* DWORD startTime;
* DWORD length;
* DWORD suggestedBufferSize;
* DWORD quality;
* DWORD sampleSize;
* aviRectangle frame;
* } AVISTREAMHEADER; */
private Track readSTRH(byte[] data) throws IOException, ParseException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
Track tr = null;
in.setByteOrder(ByteOrder.BIG_ENDIAN);
String type = intToType(in.readInt());
in.setByteOrder(ByteOrder.LITTLE_ENDIAN);
int handler = in.readInt();
if (type.equals(AVIMediaType.AUDIO.fccType)) {
tr = new AudioTrack(tracks.size(), handler);
} else if (type.equals(AVIMediaType.VIDEO.fccType)) {
tr = new VideoTrack(tracks.size(), handler, null);
} else if (type.equals(AVIMediaType.MIDI.fccType)) {
tr = new MidiTrack(tracks.size(), handler);
} else if (type.equals(AVIMediaType.TEXT.fccType)) {
tr = new TextTrack(tracks.size(), handler);
} else {
throw new ParseException("Unknown track type " + type);
}
tr.fccHandler = handler;
tr.flags = in.readInt();
tr.priority = in.readUnsignedShort();
tr.language = in.readUnsignedShort();
tr.initialFrames = in.readUnsignedInt();
tr.scale = in.readUnsignedInt();
tr.rate = in.readUnsignedInt();
tr.startTime = in.readUnsignedInt();
tr.length = in.readUnsignedInt();
/*tr.suggestedBufferSize=*/ in.readUnsignedInt();
tr.quality = in.readInt();
/*tr.sampleSize=*/ in.readUnsignedInt();
tr.frameLeft = in.readShort();
tr.frameTop = in.readShort();
tr.frameRight = in.readShort();
tr.frameBottom = in.readShort();
return tr;
}
示例9: readMPEntries
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* imageCount*16 byte MP Entry Information.
*/
private void readMPEntries(TIFFInputStream tin, IFDEntry mpEntryInformation, TIFFDirectory parent, ArrayList<FileSegment> tiffSeg) throws IOException {
byte[] buf = (byte[]) mpEntryInformation.readData(tin);
TagSet tagSet = MPEntryTagSet.getInstance();
ByteArrayImageInputStream in = new ByteArrayImageInputStream(buf);
ByteOrder bo = tin.getByteOrder();
in.setByteOrder(bo);
int numImages = (int) mpEntryInformation.getLength() / 16;
try {
for (int imageCount = 0; imageCount < numImages; imageCount++) {
TIFFDirectory ifdNode = new TIFFDirectory(tagSet, tagSet.getTag(MPFTagSet.TAG_MPEntryInformation), imageCount, mpEntryInformation.getValueOffset(), 16 * imageCount, tiffSeg);
parent.add(ifdNode);
int imageAttr = in.readInt();
short dpif = (short) (imageAttr >>> 31);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentParentImageFlag), dpif));
short dcif = (short) ((imageAttr >>> 30) & 1);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentChildImageFlag), dcif));
short rif = (short) ((imageAttr >>> 29) & 1);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_RepresentativeImageFlag), rif));
short idf = (short) ((imageAttr >>> 24) & 7);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_ImageDataFormat), idf));
long mptc = (imageAttr & 0xffffffL);
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_MPTypeCode), mptc));
// Read the individual image size
long imageSize = in.readInt() & 0xffffffffL;
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_IndividualImageSize), imageSize));
// Read the individual data offset
long imageOffset = in.readInt() & 0xffffffffL;
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_IndividualImageDataOffset), imageOffset));
imageOffsets.add(imageOffset);
// Read the dependent image 1 entry number
int dependentImageEntryNumber = in.readUnsignedShort();
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentImage1EntryNumber), dependentImageEntryNumber));
// Read the dependent image 2 entry number
dependentImageEntryNumber = in.readUnsignedShort();
ifdNode.add(new TIFFField(tagSet.getTag(MPEntryTagSet.TAG_DependentImage2EntryNumber), dependentImageEntryNumber));
}
} catch (IOException e) {
e.printStackTrace();
} finally {
in.close();
}
}
示例10: readStruct
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
public StructTableModel readStruct(String magic, byte[] data)
throws IOException {
return declarations.readStruct(magic, new ByteArrayImageInputStream(data));
}
示例11: getAmigaPicture
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
protected BufferedImage getAmigaPicture(byte[] data) throws IOException {
return ImageIO.read(new ByteArrayImageInputStream(data));
}
示例12: readAudioSTRF
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* /**
* <p> The format of a video track is defined in a "strf" chunk, which
* contains a {@code WAVEFORMATEX} struct.
* <pre>
* ----------------------
* AVI Wave Format Header
* ----------------------
* // values for this enum taken from mmreg.h
* enum {
* WAVE_FORMAT_PCM = 0x0001,
* // Microsoft Corporation
* ...many more...
* } wFormatTagEnum;
*
* typedef struct {
* WORD enum wFormatTagEnum formatTag;
* WORD numberOfChannels;
* DWORD samplesPerSec;
* DWORD avgBytesPerSec;
* WORD blockAlignment;
* WORD bitsPerSample;
* WORD cbSize;
* // Size, in bytes, of extra format information appended to the end of the
* // WAVEFORMATEX structure. This information can be used by non-PCM formats
* // to store extra attributes for the "wFormatTag". If no extra information
* // is required by the "wFormatTag", this member must be set to zero. For
* // WAVE_FORMAT_PCM formats (and only WAVE_FORMAT_PCM formats), this member
* // is ignored.
* byte[cbSize] extra;
* } WAVEFORMATEX;
* </pre>
*
*
* @param tr
* @param data
* @throws java.io.IOException
*/
private void readAudioSTRF(AudioTrack tr, byte[] data) throws IOException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
String formatTag = RIFFParser.idToString(in.readUnsignedShort());
tr.channels = in.readUnsignedShort();
tr.samplesPerSec = in.readUnsignedInt();
tr.avgBytesPerSec = in.readUnsignedInt();
tr.blockAlign = in.readUnsignedShort();
tr.bitsPerSample = in.readUnsignedShort();
if (data.length > 16) {
long cbSize = in.readUnsignedShort();
// FIXME - Don't ignore extra format information
}
tr.format = new Format(MimeTypeKey, MIME_AVI,
MediaTypeKey, MediaType.AUDIO,
EncodingKey, formatTag,
SampleRateKey, Rational.valueOf(tr.samplesPerSec),
SampleSizeInBitsKey, tr.bitsPerSample,
ChannelsKey, tr.channels,
FrameSizeKey, tr.blockAlign,
FrameRateKey, new Rational(tr.samplesPerSec, 1),
SignedKey, tr.bitsPerSample != 8,
ByteOrderKey, ByteOrder.LITTLE_ENDIAN);
}
示例13: readAudioSTRF
import org.monte.media.io.ByteArrayImageInputStream; //导入依赖的package包/类
/**
* /**
* <p> The format of a video track is defined in a "strf" chunk, which
* contains a {@code WAVEFORMATEX} struct.
* <pre>
* ----------------------
* AVI Wave Format Header
* ----------------------
* // values for this enum taken from mmreg.h
* enum {
* WAVE_FORMAT_PCM = 0x0001,
* // Microsoft Corporation
* ...many more...
* } wFormatTagEnum;
*
* typedef struct {
* WORD enum wFormatTagEnum formatTag;
* WORD numberOfChannels;
* DWORD samplesPerSec;
* DWORD avgBytesPerSec;
* WORD blockAlignment;
* WORD bitsPerSample;
* WORD cbSize;
* // Size, in bytes, of extra format information appended to the end of the
* // WAVEFORMATEX structure. This information can be used by non-PCM formats
* // to store extra attributes for the "wFormatTag". If no extra information
* // is required by the "wFormatTag", this member must be set to zero. For
* // WAVE_FORMAT_PCM formats (and only WAVE_FORMAT_PCM formats), this member
* // is ignored.
* byte[cbSize] extra;
* } WAVEFORMATEX;
* </pre>
*
*
* @param tr
* @param data
* @throws IOException
*/
private void readAudioSTRF(AudioTrack tr, byte[] data) throws IOException {
ByteArrayImageInputStream in = new ByteArrayImageInputStream(data, ByteOrder.LITTLE_ENDIAN);
String formatTag = RIFFParser.idToString(in.readUnsignedShort());
tr.channels = in.readUnsignedShort();
tr.samplesPerSec = in.readUnsignedInt();
tr.avgBytesPerSec = in.readUnsignedInt();
tr.blockAlign = in.readUnsignedShort();
tr.bitsPerSample = in.readUnsignedShort();
if (data.length > 16) {
long cbSize = in.readUnsignedShort();
// FIXME - Don't ignore extra format information
}
tr.format = new Format(MimeTypeKey, MIME_AVI,
MediaTypeKey, MediaType.AUDIO,
EncodingKey, formatTag,
SampleRateKey, Rational.valueOf(tr.samplesPerSec),
SampleSizeInBitsKey, tr.bitsPerSample,
ChannelsKey, tr.channels,
FrameSizeKey, tr.blockAlign,
FrameRateKey, new Rational(tr.samplesPerSec, 1),
SignedKey, tr.bitsPerSample != 8,
ByteOrderKey, ByteOrder.LITTLE_ENDIAN);
}