本文整理汇总了Java中java.io.DataInputStream.skip方法的典型用法代码示例。如果您正苦于以下问题:Java DataInputStream.skip方法的具体用法?Java DataInputStream.skip怎么用?Java DataInputStream.skip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类java.io.DataInputStream
的用法示例。
在下文中一共展示了DataInputStream.skip方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deserializeFirstLastKey
import java.io.DataInputStream; //导入方法依赖的package包/类
/**
* Deserializes the first and last key stored in the summary
*
* Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used.
*/
public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException
{
in.skipBytes(4); // minIndexInterval
int offsetCount = in.readInt();
long offheapSize = in.readLong();
if (haveSamplingLevel)
in.skipBytes(8); // samplingLevel, fullSamplingSummarySize
in.skip(offsetCount * 4);
in.skip(offheapSize - offsetCount * 4);
DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
return Pair.create(first, last);
}
示例2: loadCodeAttributes
import java.io.DataInputStream; //导入方法依赖的package包/类
private void loadCodeAttributes(DataInputStream in, ConstantPool pool)
throws IOException {
int count = in.readUnsignedShort();
for (int i = 0; i < count; i++) {
Object o = pool.get(in.readUnsignedShort());
if (!(o instanceof CPUTF8Info))
throw new InvalidClassFormatException();
CPUTF8Info entry = (CPUTF8Info)o;
int len = in.readInt();
String name = entry.getName();
if (name.equals("LineNumberTable")) //NOI18N
loadLineNumberTable(in, pool);
else if (name.equals("LocalVariableTable")) //NOI18N
localVariableTable =
LocalVariableTableEntry.loadLocalVariableTable(in, pool);
else if (name.equals("LocalVariableTypeTable")) //NOI18N
localVariableTypeTable =
LocalVariableTypeTableEntry.loadLocalVariableTypeTable(in, pool);
else if (name.equals("StackMapTable")) //NOI18N
stackMapTable = StackMapFrame.loadStackMapTable(in, pool);
else {
if (debug)
System.out.println("skipped unknown code attribute: " + name);
// ignore unknown attribute...
int n;
while ((n = (int)in.skip(len)) > 0 && n < len)
len -= n;
}
}
if (lineNumberTable == null)
lineNumberTable = new int[0];
if (localVariableTable == null)
localVariableTable = new LocalVariableTableEntry[0];
if (localVariableTypeTable == null)
localVariableTypeTable = new LocalVariableTypeTableEntry[0];
if (stackMapTable == null)
stackMapTable = new StackMapFrame[0];
}
示例3: decodeAvp
import java.io.DataInputStream; //导入方法依赖的package包/类
private Avp decodeAvp(byte[] in_b ) throws IOException, AvpDataException {
DataInputStream in = new DataInputStream(new ByteArrayInputStream(in_b));
int code = in.readInt();
int tmp = in.readInt();
int counter = 0;
int flags = (tmp >> 24) & 0xFF;
int length = tmp & 0xFFFFFF;
if (length < 0 || counter + length > in_b.length) {
throw new AvpDataException("Not enough data in buffer!");
}
long vendor = 0;
boolean hasVendor = false;
if ((flags & 0x80) != 0) {
vendor = in.readInt();
hasVendor = true;
}
// Determine body L = length - 4(code) -1(flags) -3(length) [-4(vendor)]
byte[] rawData = new byte[length - (8 + (hasVendor ? 4 : 0))];
in.read(rawData);
// skip remaining.
// TODO: Do we need to padd everything? Or on send stack should properly fill byte[] ... ?
if (length % 4 != 0) {
for (int i; length % 4 != 0; length += i) {
i = (int) in.skip((4 - length % 4));
}
}
AvpImpl avp = new AvpImpl(code, (short) flags, (int) vendor, rawData);
return avp;
}
示例4: readBytes
import java.io.DataInputStream; //导入方法依赖的package包/类
@Override
public void readBytes(DataInputStream dataInputStream, int size)
{
try
{
dataInputStream.skip(size);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
示例5: retrieve
import java.io.DataInputStream; //导入方法依赖的package包/类
@Override
public DataInputStream retrieve(String key, long startByteOffset)
throws AzureException, IOException {
try {
// Check if a session exists, if not create a session with the
// Azure storage server.
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
checkContainer(ContainerAccessType.PureRead);
// Get blob reference and open the input buffer stream.
CloudBlobWrapper blob = getBlobReference(key);
// Open input stream and seek to the start offset.
InputStream in = blob.openInputStream(
getDownloadOptions(), getInstrumentedContext(isConcurrentOOBAppendAllowed()));
// Create a data input stream.
DataInputStream inDataStream = new DataInputStream(in);
// Skip bytes and ignore return value. This is okay
// because if you try to skip too far you will be positioned
// at the end and reads will not return data.
inDataStream.skip(startByteOffset);
return inDataStream;
} catch (Exception e) {
// Re-throw as an Azure storage exception.
throw new AzureException(e);
}
}
示例6: skipFully
import java.io.DataInputStream; //导入方法依赖的package包/类
static void skipFully(DataInputStream stream, int n) throws IOException {
long skipped = 0;
do {
long s = stream.skip(n - skipped);
skipped += s;
if (s == 0 && skipped != n) {
// Check for EOF (i.e., truncated class file)
if (stream.read() == -1) {
throw new IOException("truncated stream");
}
skipped++;
}
} while (skipped != n);
}
示例7: GFSnapshotImporter
import java.io.DataInputStream; //导入方法依赖的package包/类
public GFSnapshotImporter(File in) throws IOException, ClassNotFoundException {
pdx = new ExportedRegistry();
// read header and pdx registry
long entryPosition;
FileInputStream fis = new FileInputStream(in);
FileChannel fc = fis.getChannel();
DataInputStream tmp = new DataInputStream(fis);
try {
// read the snapshot file header
version = tmp.readByte();
if (version == SNAP_VER_1) {
throw new IOException(
LocalizedStrings.Snapshot_UNSUPPORTED_SNAPSHOT_VERSION_0.toLocalizedString(SNAP_VER_1)
+ ": " + in);
} else if (version == SNAP_VER_2) {
// read format
byte[] format = new byte[3];
tmp.readFully(format);
if (!Arrays.equals(format, SNAP_FMT)) {
throw new IOException(LocalizedStrings.Snapshot_UNRECOGNIZED_FILE_TYPE_0
.toLocalizedString(Arrays.toString(format)) + ": " + in);
}
// read pdx location
long registryPosition = tmp.readLong();
// read region
region = tmp.readUTF();
entryPosition = fc.position();
// read pdx
if (registryPosition != -1) {
fc.position(registryPosition);
pdx.fromData(tmp);
}
} else {
throw new IOException(
LocalizedStrings.Snapshot_UNRECOGNIZED_FILE_VERSION_0.toLocalizedString(version)
+ ": " + in);
}
} finally {
tmp.close();
}
// check compatibility with the existing pdx types so we don't have to
// do any translation...preexisting types or concurrent put ops may cause
// this check to fail
checkPdxTypeCompatibility();
checkPdxEnumCompatibility();
// open new stream with buffering for reading entries
dis = new DataInputStream(new BufferedInputStream(new FileInputStream(in)));
dis.skip(entryPosition);
}
示例8: deserializeFromPB
import java.io.DataInputStream; //导入方法依赖的package包/类
/**
* Deserialize the file trailer as protobuf
* @param inputStream
* @throws IOException
*/
void deserializeFromPB(DataInputStream inputStream) throws IOException {
// read PB and skip padding
int start = inputStream.available();
HFileProtos.FileTrailerProto trailerProto =
HFileProtos.FileTrailerProto.PARSER.parseDelimitedFrom(inputStream);
int size = start - inputStream.available();
inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size);
// process the PB
if (trailerProto.hasFileInfoOffset()) {
fileInfoOffset = trailerProto.getFileInfoOffset();
}
if (trailerProto.hasLoadOnOpenDataOffset()) {
loadOnOpenDataOffset = trailerProto.getLoadOnOpenDataOffset();
}
if (trailerProto.hasUncompressedDataIndexSize()) {
uncompressedDataIndexSize = trailerProto.getUncompressedDataIndexSize();
}
if (trailerProto.hasTotalUncompressedBytes()) {
totalUncompressedBytes = trailerProto.getTotalUncompressedBytes();
}
if (trailerProto.hasDataIndexCount()) {
dataIndexCount = trailerProto.getDataIndexCount();
}
if (trailerProto.hasMetaIndexCount()) {
metaIndexCount = trailerProto.getMetaIndexCount();
}
if (trailerProto.hasEntryCount()) {
entryCount = trailerProto.getEntryCount();
}
if (trailerProto.hasNumDataIndexLevels()) {
numDataIndexLevels = trailerProto.getNumDataIndexLevels();
}
if (trailerProto.hasFirstDataBlockOffset()) {
firstDataBlockOffset = trailerProto.getFirstDataBlockOffset();
}
if (trailerProto.hasLastDataBlockOffset()) {
lastDataBlockOffset = trailerProto.getLastDataBlockOffset();
}
if (trailerProto.hasComparatorClassName()) {
// TODO this is a classname encoded into an HFile's trailer. We are going to need to have
// some compat code here.
setComparatorClass(getComparatorClass(trailerProto.getComparatorClassName()));
}
if (trailerProto.hasCompressionCodec()) {
compressionCodec = Compression.Algorithm.values()[trailerProto.getCompressionCodec()];
} else {
compressionCodec = Compression.Algorithm.NONE;
}
if (trailerProto.hasEncryptionKey()) {
encryptionKey = trailerProto.getEncryptionKey().toByteArray();
}
}
示例9: computeHash
import java.io.DataInputStream; //导入方法依赖的package包/类
public static String computeHash(InputStream stream, long length) throws IOException {
int chunkSizeForFile = (int) Math.min(HASH_CHUNK_SIZE, length);
// buffer that will contain the head and the tail chunk, chunks will overlap if length is smaller than two chunks
byte[] chunkBytes = new byte[(int) Math.min(2 * HASH_CHUNK_SIZE, length)];
DataInputStream in = new DataInputStream(stream);
// first chunk
in.readFully(chunkBytes, 0, chunkSizeForFile);
long position = chunkSizeForFile;
long tailChunkPosition = length - chunkSizeForFile;
// seek to position of the tail chunk, or not at all if length is smaller than two chunks
while (position < tailChunkPosition && (position += in.skip(tailChunkPosition - position)) >= 0) {
}
// second chunk, or the rest of the data if length is smaller than two chunks
in.readFully(chunkBytes, chunkSizeForFile, chunkBytes.length - chunkSizeForFile);
long head = computeHashForChunk(ByteBuffer.wrap(chunkBytes, 0, chunkSizeForFile));
long tail = computeHashForChunk(ByteBuffer.wrap(chunkBytes, chunkBytes.length - chunkSizeForFile, chunkSizeForFile));
return String.format("%016x", length + head + tail);
}