本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.skip方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.skip方法的具体用法?Java ByteBufferUtils.skip怎么用?Java ByteBufferUtils.skip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.skip方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: decodeTags
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void decodeTags() {
current.tagsLength = ByteBufferUtils.readCompressedInt(currentBuffer);
if (tagCompressionContext != null) {
if (current.uncompressTags) {
// Tag compression is been used. uncompress it into tagsBuffer
current.ensureSpaceForTags();
try {
current.tagsCompressedLength = tagCompressionContext.uncompressTags(currentBuffer,
current.tagsBuffer, 0, current.tagsLength);
} catch (IOException e) {
throw new RuntimeException("Exception while uncompressing tags", e);
}
} else {
ByteBufferUtils.skip(currentBuffer, current.tagsCompressedLength);
current.uncompressTags = true;// Reset this.
}
current.tagsOffset = -1;
} else {
// When tag compress is not used, let us not do copying of tags bytes into tagsBuffer.
// Just mark the tags Offset so as to create the KV buffer later in getKeyValueBuffer()
current.tagsOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.tagsLength);
}
}
示例2: decodeTags
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void decodeTags() {
current.tagsLength = ByteBufferUtils.readCompressedInt(currentBuffer);
if (tagCompressionContext != null) {
if (current.uncompressTags) {
// Tag compression is been used. uncompress it into tagsBuffer
current.ensureSpaceForTags();
try {
current.tagsCompressedLength = tagCompressionContext.uncompressTags(currentBuffer,
current.tagsBuffer, 0, current.tagsLength);
} catch (IOException e) {
throw new RuntimeException("Exception while uncompressing tags", e);
}
} else {
ByteBufferUtils.skip(currentBuffer, current.tagsCompressedLength);
current.uncompressTags = true;// Reset this.
}
current.tagsOffset = -1;
} else {
// When tag compress is not used, let us not do temp copying of tags bytes into tagsBuffer.
// Just mark the tags Offset so as to create the KV buffer later in getKeyValueBuffer()
current.tagsOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.tagsLength);
}
}
示例3: readKeyValueLen
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void readKeyValueLen() {
blockBuffer.mark();
currKeyLen = blockBuffer.getInt();
currValueLen = blockBuffer.getInt();
ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen);
readMvccVersion();
if (currKeyLen < 0 || currValueLen < 0
|| currKeyLen > blockBuffer.limit()
|| currValueLen > blockBuffer.limit()) {
throw new IllegalStateException("Invalid currKeyLen " + currKeyLen
+ " or currValueLen " + currValueLen + ". Block offset: "
+ block.getOffset() + ", block length: " + blockBuffer.limit()
+ ", position: " + blockBuffer.position() + " (without header).");
}
blockBuffer.reset();
}
示例4: readKeyValueLen
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void readKeyValueLen() {
blockBuffer.mark();
currKeyLen = blockBuffer.getInt();
currValueLen = blockBuffer.getInt();
if (currKeyLen < 0 || currValueLen < 0 || currKeyLen > blockBuffer.limit()
|| currValueLen > blockBuffer.limit()) {
throw new IllegalStateException("Invalid currKeyLen " + currKeyLen + " or currValueLen "
+ currValueLen + ". Block offset: "
+ block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: "
+ blockBuffer.position() + " (without header).");
}
ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen);
if (reader.hfileContext.isIncludesTags()) {
currTagsLen = blockBuffer.getShort();
if (currTagsLen < 0 || currTagsLen > blockBuffer.limit()) {
throw new IllegalStateException("Invalid currTagsLen " + currTagsLen + ". Block offset: "
+ block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: "
+ blockBuffer.position() + " (without header).");
}
ByteBufferUtils.skip(blockBuffer, currTagsLen);
}
readMvccVersion();
blockBuffer.reset();
}
示例5: decodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* I don't think this method is called during normal HBase operation, so efficiency is not
* important.
*/
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength,
int skipLastBytes, HFileBlockDecodingContext decodingCtx) throws IOException {
ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);// waste
sourceAsBuffer.mark();
PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(sourceAsBuffer);
sourceAsBuffer.rewind();
int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
result.rewind();
CellSearcher searcher = null;
try {
boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc();
searcher = DecoderFactory.checkOut(sourceAsBuffer, includesMvcc);
while (searcher.advance()) {
KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
// needs to be modified for DirectByteBuffers. no existing methods to
// write VLongs to byte[]
int offset = result.arrayOffset() + result.position();
System.arraycopy(currentCell.getBuffer(), currentCell.getOffset(), result.array(), offset,
currentCell.getLength());
int keyValueLength = KeyValueUtil.length(currentCell);
ByteBufferUtils.skip(result, keyValueLength);
offset += keyValueLength;
if (includesMvcc) {
ByteBufferUtils.writeVLong(result, currentCell.getMvccVersion());
}
}
result.position(result.limit());//make it appear as if we were appending
return result;
} finally {
DecoderFactory.checkIn(searcher);
}
}
示例6: nextShallowCopy
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
* position to the start of the next KeyValue. Does not allocate a new array or copy data.
* @param bb
* @param includesMvccVersion
* @param includesTags
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
boolean includesTags) {
if (bb.isDirect()) {
throw new IllegalArgumentException("only supports heap buffers");
}
if (bb.remaining() < 1) {
return null;
}
KeyValue keyValue = null;
int underlyingArrayOffset = bb.arrayOffset() + bb.position();
int keyLength = bb.getInt();
int valueLength = bb.getInt();
ByteBufferUtils.skip(bb, keyLength + valueLength);
int tagsLength = 0;
if (includesTags) {
// Read short as unsigned, high byte first
tagsLength = ((bb.get() & 0xff) << 8) ^ (bb.get() & 0xff);
ByteBufferUtils.skip(bb, tagsLength);
}
int kvLength = (int) KeyValue.getKeyValueDataStructureSize(keyLength, valueLength, tagsLength);
keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
if (includesMvccVersion) {
long mvccVersion = ByteBufferUtils.readVLong(bb);
keyValue.setSequenceId(mvccVersion);
}
return keyValue;
}
示例7: compressTags
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Compress tags one by one and writes to the OutputStream.
* @param out Stream to which the compressed tags to be written
* @param in Source buffer where tags are available
* @param length Length of all tag bytes
* @throws IOException
*/
public void compressTags(OutputStream out, ByteBuffer in, int length) throws IOException {
if (in.hasArray()) {
compressTags(out, in.array(), in.arrayOffset() + in.position(), length);
ByteBufferUtils.skip(in, length);
} else {
byte[] tagBuf = new byte[length];
in.get(tagBuf);
compressTags(out, tagBuf, 0, length);
}
}
示例8: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
final HFileBlockDecodingContext decodingCtx) {
return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
@Override
protected void decodeNext() {
current.keyLength = currentBuffer.getInt();
current.valueLength = currentBuffer.getInt();
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, 0, current.keyLength);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesTags()) {
// Read short as unsigned, high byte first
current.tagsLength = ((currentBuffer.get() & 0xff) << 8) ^ (currentBuffer.get() & 0xff);
ByteBufferUtils.skip(currentBuffer, current.tagsLength);
}
if (includesMvcc()) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
current.lastCommonPrefix = 0;
decodeNext();
}
};
}
示例9: readKey
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Analyze the key and fill the state assuming we know previous state.
* Uses mark() and reset() in ByteBuffer to avoid moving the position.
* <p>
* This method overrides all the fields of this instance, except
* {@link #prevOffset}, which is usually manipulated directly by encoders
* and decoders.
* @param in Buffer at the position where key starts
* @param keyLength Length of key in bytes
* @param valueLength Length of values in bytes
* @param commonPrefix how many first bytes are common with previous KeyValue
* @param previousState State from previous KeyValue
*/
void readKey(ByteBuffer in, int keyLength, int valueLength,
int commonPrefix, CompressionState previousState) {
this.keyLength = keyLength;
this.valueLength = valueLength;
// fill the state
in.mark(); // mark beginning of key
if (commonPrefix < KeyValue.ROW_LENGTH_SIZE) {
rowLength = in.getShort();
ByteBufferUtils.skip(in, rowLength);
familyLength = in.get();
qualifierLength = keyLength - rowLength - familyLength -
KeyValue.KEY_INFRASTRUCTURE_SIZE;
ByteBufferUtils.skip(in, familyLength + qualifierLength);
} else {
rowLength = previousState.rowLength;
familyLength = previousState.familyLength;
qualifierLength = previousState.qualifierLength +
keyLength - previousState.keyLength;
ByteBufferUtils.skip(in, (KeyValue.ROW_LENGTH_SIZE +
KeyValue.FAMILY_LENGTH_SIZE) +
rowLength + familyLength + qualifierLength);
}
readTimestamp(in);
type = in.get();
in.reset();
}
示例10: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
final HFileBlockDecodingContext decodingCtx) {
return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
@Override
protected void decodeNext() {
current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.lastCommonPrefix =
ByteBufferUtils.readCompressedInt(currentBuffer);
current.keyLength += current.lastCommonPrefix;
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
current.keyLength - current.lastCommonPrefix);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesTags()) {
decodeTags();
}
if (includesMvcc()) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
decodeNext();
}
};
}
示例11: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
final boolean includesMemstoreTS) {
return new BufferedEncodedSeeker<SeekerState>(comparator) {
@Override
protected void decodeNext() {
current.keyLength = currentBuffer.getInt();
current.valueLength = currentBuffer.getInt();
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, 0, current.keyLength);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesMemstoreTS) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
current.lastCommonPrefix = 0;
decodeNext();
}
};
}
示例12: addKV
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int addKV(int prevKeyOffset, DataOutputStream out,
ByteBuffer in, int prevKeyLength) throws IOException {
int keyLength = in.getInt();
int valueLength = in.getInt();
if (prevKeyOffset == -1) {
// copy the key, there is no common prefix with none
ByteBufferUtils.putCompressedInt(out, keyLength);
ByteBufferUtils.putCompressedInt(out, valueLength);
ByteBufferUtils.putCompressedInt(out, 0);
ByteBufferUtils.moveBufferToStream(out, in, keyLength + valueLength);
} else {
// find a common prefix and skip it
int common = ByteBufferUtils.findCommonPrefix(
in, prevKeyOffset + KeyValue.ROW_OFFSET,
in.position(),
Math.min(prevKeyLength, keyLength));
ByteBufferUtils.putCompressedInt(out, keyLength - common);
ByteBufferUtils.putCompressedInt(out, valueLength);
ByteBufferUtils.putCompressedInt(out, common);
ByteBufferUtils.skip(in, common);
ByteBufferUtils.moveBufferToStream(out, in, keyLength - common
+ valueLength);
}
return keyLength;
}
示例13: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
final boolean includesMemstoreTS) {
return new BufferedEncodedSeeker<SeekerState>(comparator) {
@Override
protected void decodeNext() {
current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.lastCommonPrefix =
ByteBufferUtils.readCompressedInt(currentBuffer);
current.keyLength += current.lastCommonPrefix;
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
current.keyLength - current.lastCommonPrefix);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesMemstoreTS) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
decodeNext();
}
};
}
示例14: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
final HFileBlockDecodingContext decodingCtx) {
return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
@Override
protected void decodeNext() {
current.keyLength = currentBuffer.getInt();
current.valueLength = currentBuffer.getInt();
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, 0, current.keyLength);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesTags()) {
current.tagsLength = currentBuffer.getShort();
ByteBufferUtils.skip(currentBuffer, current.tagsLength);
}
if (includesMvcc()) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
current.lastCommonPrefix = 0;
decodeNext();
}
};
}
示例15: nextShallowCopy
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
* position to the start of the next KeyValue. Does not allocate a new array or copy data.
* @param bb
* @param includesMvccVersion
* @param includesTags
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
boolean includesTags) {
if (bb.isDirect()) {
throw new IllegalArgumentException("only supports heap buffers");
}
if (bb.remaining() < 1) {
return null;
}
KeyValue keyValue = null;
int underlyingArrayOffset = bb.arrayOffset() + bb.position();
int keyLength = bb.getInt();
int valueLength = bb.getInt();
ByteBufferUtils.skip(bb, keyLength + valueLength);
short tagsLength = 0;
if (includesTags) {
tagsLength = bb.getShort();
ByteBufferUtils.skip(bb, tagsLength);
}
int kvLength = (int) KeyValue.getKeyValueDataStructureSize(keyLength, valueLength, tagsLength);
keyValue = new KeyValue(bb.array(), underlyingArrayOffset, kvLength);
if (includesMvccVersion) {
long mvccVersion = ByteBufferUtils.readVLong(bb);
keyValue.setMvccVersion(mvccVersion);
}
return keyValue;
}