本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.writeVLong方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.writeVLong方法的具体用法?Java ByteBufferUtils.writeVLong怎么用?Java ByteBufferUtils.writeVLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.writeVLong方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: decodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* I don't think this method is called during normal HBase operation, so efficiency is not
* important.
*/
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength,
int skipLastBytes, HFileBlockDecodingContext decodingCtx) throws IOException {
ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);// waste
sourceAsBuffer.mark();
PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(sourceAsBuffer);
sourceAsBuffer.rewind();
int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
result.rewind();
CellSearcher searcher = null;
try {
boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc();
searcher = DecoderFactory.checkOut(sourceAsBuffer, includesMvcc);
while (searcher.advance()) {
KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
// needs to be modified for DirectByteBuffers. no existing methods to
// write VLongs to byte[]
int offset = result.arrayOffset() + result.position();
System.arraycopy(currentCell.getBuffer(), currentCell.getOffset(), result.array(), offset,
currentCell.getLength());
int keyValueLength = KeyValueUtil.length(currentCell);
ByteBufferUtils.skip(result, keyValueLength);
offset += keyValueLength;
if (includesMvcc) {
ByteBufferUtils.writeVLong(result, currentCell.getMvccVersion());
}
}
result.position(result.limit());//make it appear as if we were appending
return result;
} finally {
DecoderFactory.checkIn(searcher);
}
}
示例2: appendToByteBuffer
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
final boolean includeMvccVersion) {
// keep pushing the limit out. assume enough capacity
bb.limit(bb.position() + kv.getLength());
bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
if (includeMvccVersion) {
int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getMvccVersion());
ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
ByteBufferUtils.writeVLong(bb, kv.getMvccVersion());
}
}
示例3: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
if (decodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = ByteBufferUtils.readCompressedInt(source);
// Put as unsigned short
dest.put((byte) ((tagsLength >> 8) & 0xff));
dest.put((byte) (tagsLength & 0xff));
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
// When tag compression is been used in this file, tagCompressionContext will have a not
// null value passed.
if (tagCompressionContext != null) {
tagCompressionContext.uncompressTags(source, dest, tagsLength);
} else {
ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
}
}
}
if (decodingCtx.getHFileContext().isIncludesMvcc()) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例4: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, boolean includesMemstoreTS) {
if (includesMemstoreTS) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例5: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
if (decodingCtx.getHFileContext().isIncludesTags()) {
short tagsLength = (short) ByteBufferUtils.readCompressedInt(source);
dest.putShort(tagsLength);
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
// When tag compression is been used in this file, tagCompressionContext will have a not
// null value passed.
if (tagCompressionContext != null) {
tagCompressionContext.uncompressTags(source, dest, tagsLength);
} else {
ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
}
}
}
if (decodingCtx.getHFileContext().isIncludesMvcc()) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例6: appendToByteBuffer
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
final boolean includeMvccVersion) {
// keep pushing the limit out. assume enough capacity
bb.limit(bb.position() + kv.getLength());
bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
if (includeMvccVersion) {
int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getSequenceId());
ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
ByteBufferUtils.writeVLong(bb, kv.getSequenceId());
}
}
示例7: appendToByteBuffer
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
final boolean includeMvccVersion) {
// keep pushing the limit out. assume enough capacity
bb.limit(bb.position() + kv.getLength());
bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
// TODO tags
if (includeMvccVersion) {
// TODO: 0.98 - comatibility
int numMvccVersionBytes = WritableUtils.getVIntSize(0/*kv.getMemstoreTS()*/);
bb.limit(bb.limit() + numMvccVersionBytes);
ByteBufferUtils.writeVLong(bb, 0/*kv.getMemstoreTS()*/);
}
}
示例8: decodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* I don't think this method is called during normal HBase operation, so efficiency is not
* important.
*/
@Override
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength,
int skipLastBytes, boolean includesMvccVersion) throws IOException {
ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);// waste
sourceAsBuffer.mark();
PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(sourceAsBuffer);
sourceAsBuffer.rewind();
int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
result.rewind();
CellSearcher searcher = null;
try {
searcher = DecoderFactory.checkOut(sourceAsBuffer, includesMvccVersion);
while (searcher.advance()) {
KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
// needs to be modified for DirectByteBuffers. no existing methods to
// write VLongs to byte[]
int offset = result.arrayOffset() + result.position();
KeyValueUtil.appendToByteArray(currentCell, result.array(), offset);
int keyValueLength = KeyValueUtil.length(currentCell);
ByteBufferUtils.skip(result, keyValueLength);
offset += keyValueLength;
if (includesMvccVersion) {
ByteBufferUtils.writeVLong(result, currentCell.getMvccVersion());
}
}
result.position(result.limit());//make it appear as if we were appending
return result;
} finally {
DecoderFactory.checkIn(searcher);
}
}