本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.readCompressedInt方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.readCompressedInt方法的具体用法?Java ByteBufferUtils.readCompressedInt怎么用?Java ByteBufferUtils.readCompressedInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.readCompressedInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: decodeTags
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void decodeTags() {
current.tagsLength = ByteBufferUtils.readCompressedInt(currentBuffer);
if (tagCompressionContext != null) {
if (current.uncompressTags) {
// Tag compression is been used. uncompress it into tagsBuffer
current.ensureSpaceForTags();
try {
current.tagsCompressedLength = tagCompressionContext.uncompressTags(currentBuffer,
current.tagsBuffer, 0, current.tagsLength);
} catch (IOException e) {
throw new RuntimeException("Exception while uncompressing tags", e);
}
} else {
ByteBufferUtils.skip(currentBuffer, current.tagsCompressedLength);
current.uncompressTags = true;// Reset this.
}
current.tagsOffset = -1;
} else {
// When tag compress is not used, let us not do copying of tags bytes into tagsBuffer.
// Just mark the tags Offset so as to create the KV buffer later in getKeyValueBuffer()
current.tagsOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.tagsLength);
}
}
示例2: getFirstKeyInBlock
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
block.mark();
block.position(Bytes.SIZEOF_INT);
int keyLength = ByteBufferUtils.readCompressedInt(block);
ByteBufferUtils.readCompressedInt(block);
int commonLength = ByteBufferUtils.readCompressedInt(block);
if (commonLength != 0) {
throw new AssertionError("Nonzero common length in the first key in "
+ "block: " + commonLength);
}
int pos = block.position();
block.reset();
ByteBuffer dup = block.duplicate();
dup.position(pos);
dup.limit(pos + keyLength);
return dup.slice();
}
示例3: decodeTags
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void decodeTags() {
current.tagsLength = ByteBufferUtils.readCompressedInt(currentBuffer);
if (tagCompressionContext != null) {
if (current.uncompressTags) {
// Tag compression is been used. uncompress it into tagsBuffer
current.ensureSpaceForTags();
try {
current.tagsCompressedLength = tagCompressionContext.uncompressTags(currentBuffer,
current.tagsBuffer, 0, current.tagsLength);
} catch (IOException e) {
throw new RuntimeException("Exception while uncompressing tags", e);
}
} else {
ByteBufferUtils.skip(currentBuffer, current.tagsCompressedLength);
current.uncompressTags = true;// Reset this.
}
current.tagsOffset = -1;
} else {
// When tag compress is not used, let us not do temp copying of tags bytes into tagsBuffer.
// Just mark the tags Offset so as to create the KV buffer later in getKeyValueBuffer()
current.tagsOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.tagsLength);
}
}
示例4: getFirstKeyInBlock
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
block.mark();
block.position(Bytes.SIZEOF_INT);
int keyLength = ByteBufferUtils.readCompressedInt(block);
ByteBufferUtils.readCompressedInt(block);
int commonLength = ByteBufferUtils.readCompressedInt(block);
if (commonLength != 0) {
throw new AssertionError("Nonzero common length in the first key in "
+ "block: " + commonLength);
}
int pos = block.position();
block.reset();
return ByteBuffer.wrap(block.array(), block.arrayOffset() + pos, keyLength)
.slice();
}
示例5: getFirstKeyInBlock
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
block.mark();
block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
int keyLength = ByteBufferUtils.readCompressedInt(block);
ByteBufferUtils.readCompressedInt(block); // valueLength
ByteBufferUtils.readCompressedInt(block); // commonLength
int pos = block.position();
block.reset();
ByteBuffer dup = block.duplicate();
dup.position(pos);
dup.limit(pos + keyLength);
return dup.slice();
}
示例6: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
if (decodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = ByteBufferUtils.readCompressedInt(source);
// Put as unsigned short
dest.put((byte) ((tagsLength >> 8) & 0xff));
dest.put((byte) (tagsLength & 0xff));
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
// When tag compression is been used in this file, tagCompressionContext will have a not
// null value passed.
if (tagCompressionContext != null) {
tagCompressionContext.uncompressTags(source, dest, tagsLength);
} else {
ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
}
}
}
if (decodingCtx.getHFileContext().isIncludesMvcc()) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例7: decodeKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int decodeKeyValue(DataInputStream source, ByteBuffer buffer,
int prevKeyOffset)
throws IOException, EncoderBufferTooSmallException {
int keyLength = ByteBufferUtils.readCompressedInt(source);
int valueLength = ByteBufferUtils.readCompressedInt(source);
int commonLength = ByteBufferUtils.readCompressedInt(source);
int keyOffset;
keyLength += commonLength;
ensureSpace(buffer, keyLength + valueLength + KeyValue.ROW_OFFSET);
buffer.putInt(keyLength);
buffer.putInt(valueLength);
// copy the prefix
if (commonLength > 0) {
keyOffset = buffer.position();
ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
commonLength);
} else {
keyOffset = buffer.position();
}
// copy rest of the key and value
int len = keyLength - commonLength + valueLength;
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
return keyOffset;
}
示例8: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
final HFileBlockDecodingContext decodingCtx) {
return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
@Override
protected void decodeNext() {
current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.lastCommonPrefix =
ByteBufferUtils.readCompressedInt(currentBuffer);
current.keyLength += current.lastCommonPrefix;
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
current.keyLength - current.lastCommonPrefix);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesTags()) {
decodeTags();
}
if (includesMvcc()) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
decodeNext();
}
};
}
示例9: getFirstKeyInBlock
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
block.mark();
block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
int keyLength = ByteBufferUtils.readCompressedInt(block);
ByteBufferUtils.readCompressedInt(block); // valueLength
ByteBufferUtils.readCompressedInt(block); // commonLength
int pos = block.position();
block.reset();
return ByteBuffer.wrap(block.array(), pos, keyLength).slice();
}
示例10: uncompressKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int uncompressKeyValue(DataInputStream source, ByteBuffer buffer,
int prevKeyOffset)
throws IOException, EncoderBufferTooSmallException {
int keyLength = ByteBufferUtils.readCompressedInt(source);
int valueLength = ByteBufferUtils.readCompressedInt(source);
int commonLength = ByteBufferUtils.readCompressedInt(source);
int keyOffset;
keyLength += commonLength;
ByteBufferUtils.ensureSpace(buffer, keyLength + valueLength
+ KeyValue.ROW_OFFSET);
buffer.putInt(keyLength);
buffer.putInt(valueLength);
// copy the prefix
if (commonLength > 0) {
keyOffset = buffer.position();
ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
commonLength);
} else {
keyOffset = buffer.position();
}
// copy rest of the key and value
int len = keyLength - commonLength + valueLength;
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
return keyOffset;
}
示例11: getFirstKeyInBlock
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
block.mark();
block.position(Bytes.SIZEOF_INT);
int keyLength = ByteBufferUtils.readCompressedInt(block);
ByteBufferUtils.readCompressedInt(block);
int commonLength = ByteBufferUtils.readCompressedInt(block);
if (commonLength != 0) {
throw new AssertionError("Nonzero common length in the first key in "
+ "block: " + commonLength);
}
int pos = block.position();
block.reset();
return ByteBuffer.wrap(block.array(), pos, keyLength).slice();
}
示例12: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
final boolean includesMemstoreTS) {
return new BufferedEncodedSeeker<SeekerState>(comparator) {
@Override
protected void decodeNext() {
current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.lastCommonPrefix =
ByteBufferUtils.readCompressedInt(currentBuffer);
current.keyLength += current.lastCommonPrefix;
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
current.keyLength - current.lastCommonPrefix);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesMemstoreTS) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
decodeNext();
}
};
}
示例13: getFirstKeyInBlock
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
block.mark();
block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
int keyLength = ByteBufferUtils.readCompressedInt(block);
ByteBufferUtils.readCompressedInt(block); // valueLength
ByteBufferUtils.readCompressedInt(block); // commonLength
int pos = block.position();
block.reset();
return ByteBuffer.wrap(block.array(), block.arrayOffset() + pos, keyLength)
.slice();
}
示例14: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
if (decodingCtx.getHFileContext().isIncludesTags()) {
short tagsLength = (short) ByteBufferUtils.readCompressedInt(source);
dest.putShort(tagsLength);
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
// When tag compression is been used in this file, tagCompressionContext will have a not
// null value passed.
if (tagCompressionContext != null) {
tagCompressionContext.uncompressTags(source, dest, tagsLength);
} else {
ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
}
}
}
if (decodingCtx.getHFileContext().isIncludesMvcc()) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例15: createSeeker
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
final boolean includesMemstoreTS) {
return new BufferedEncodedSeeker<SeekerState>(comparator) {
@Override
protected void decodeNext() {
current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
current.lastCommonPrefix =
ByteBufferUtils.readCompressedInt(currentBuffer);
current.keyLength += current.lastCommonPrefix;
current.ensureSpaceForKey();
currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
current.keyLength - current.lastCommonPrefix);
current.valueOffset = currentBuffer.position();
ByteBufferUtils.skip(currentBuffer, current.valueLength);
if (includesMemstoreTS) {
current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
} else {
current.memstoreTS = 0;
}
current.nextKvOffset = currentBuffer.position();
}
@Override
protected void decodeFirst() {
ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
decodeNext();
}
};
}