本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.putCompressedInt方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.putCompressedInt方法的具体用法?Java ByteBufferUtils.putCompressedInt怎么用?Java ByteBufferUtils.putCompressedInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.putCompressedInt方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: afterEncodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* @param cell
* @param out
* @param encodingCtx
* @return unencoded size added
* @throws IOException
*/
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
tagCompressionContext
.compressTags(out, cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
} else {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = cell.getSequenceId();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
}
示例2: internalEncode
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
DataOutputStream out) throws IOException {
int klength = KeyValueUtil.keyLength(cell);
int vlength = cell.getValueLength();
EncodingState state = encodingContext.getEncodingState();
if (state.prevCell == null) {
// copy the key, there is no common prefix with none
ByteBufferUtils.putCompressedInt(out, klength);
ByteBufferUtils.putCompressedInt(out, vlength);
ByteBufferUtils.putCompressedInt(out, 0);
CellUtil.writeFlatKey(cell, out);
} else {
// find a common prefix and skip it
int common = CellUtil.findCommonPrefixInFlatKey(cell, state.prevCell, true, true);
ByteBufferUtils.putCompressedInt(out, klength - common);
ByteBufferUtils.putCompressedInt(out, vlength);
ByteBufferUtils.putCompressedInt(out, common);
writeKeyExcludingCommon(cell, common, out);
}
// Write the value part
out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
size += afterEncodingKeyValue(cell, out, encodingContext);
state.prevCell = cell;
return size;
}
示例3: addKV
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int addKV(int prevKeyOffset, DataOutputStream out,
ByteBuffer in, int prevKeyLength) throws IOException {
int keyLength = in.getInt();
int valueLength = in.getInt();
if (prevKeyOffset == -1) {
// copy the key, there is no common prefix with none
ByteBufferUtils.putCompressedInt(out, keyLength);
ByteBufferUtils.putCompressedInt(out, valueLength);
ByteBufferUtils.putCompressedInt(out, 0);
ByteBufferUtils.moveBufferToStream(out, in, keyLength + valueLength);
} else {
// find a common prefix and skip it
int common = ByteBufferUtils.findCommonPrefix(
in, prevKeyOffset + KeyValue.ROW_OFFSET,
in.position(),
Math.min(prevKeyLength, keyLength));
ByteBufferUtils.putCompressedInt(out, keyLength - common);
ByteBufferUtils.putCompressedInt(out, valueLength);
ByteBufferUtils.putCompressedInt(out, common);
ByteBufferUtils.skip(in, common);
ByteBufferUtils.moveBufferToStream(out, in, keyLength - common
+ valueLength);
}
return keyLength;
}
示例4: afterEncodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterEncodingKeyValue(ByteBuffer in,
DataOutputStream out, HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
if (encodingCtx.getHFileContext().isIncludesTags()) {
short tagsLength = in.getShort();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
tagCompressionContext.compressTags(out, in, tagsLength);
} else {
ByteBufferUtils.moveBufferToStream(out, in, tagsLength);
}
}
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = -1;
try {
memstoreTS = ByteBufferUtils.readVLong(in);
WritableUtils.writeVLong(out, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after encoding a key/value");
}
}
}
示例5: afterEncodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* @param cell
* @param out
* @param encodingCtx
* @return unencoded size added
* @throws IOException
*/
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
// Not passing tagsLength considering that parsing of the tagsLength is not costly
PrivateCellUtil.compressTags(out, cell, tagCompressionContext);
} else {
PrivateCellUtil.writeTags(out, cell, tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = cell.getSequenceId();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
}
示例6: internalEncode
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
DataOutputStream out) throws IOException {
int klength = KeyValueUtil.keyLength(cell);
int vlength = cell.getValueLength();
EncodingState state = encodingContext.getEncodingState();
if (state.prevCell == null) {
// copy the key, there is no common prefix with none
ByteBufferUtils.putCompressedInt(out, klength);
ByteBufferUtils.putCompressedInt(out, vlength);
ByteBufferUtils.putCompressedInt(out, 0);
PrivateCellUtil.writeFlatKey(cell, (DataOutput)out);
} else {
// find a common prefix and skip it
int common = PrivateCellUtil.findCommonPrefixInFlatKey(cell, state.prevCell, true, true);
ByteBufferUtils.putCompressedInt(out, klength - common);
ByteBufferUtils.putCompressedInt(out, vlength);
ByteBufferUtils.putCompressedInt(out, common);
writeKeyExcludingCommon(cell, common, out);
}
// Write the value part
PrivateCellUtil.writeValue(out, cell, vlength);
int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
size += afterEncodingKeyValue(cell, out, encodingContext);
state.prevCell = cell;
return size;
}
示例7: afterEncodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* @param kv
* @param out
* @param encodingCtx
* @return unencoded size added
* @throws IOException
*/
protected final int afterEncodingKeyValue(KeyValue kv, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
int size = 0;
if (encodingCtx.getHFileContext().isIncludesTags()) {
short tagsLength = kv.getTagsLength();
ByteBufferUtils.putCompressedInt(out, tagsLength);
// There are some tags to be written
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
// When tag compression is enabled, tagCompressionContext will have a not null value. Write
// the tags using Dictionary compression in such a case
if (tagCompressionContext != null) {
tagCompressionContext
.compressTags(out, kv.getTagsArray(), kv.getTagsOffset(), tagsLength);
} else {
out.write(kv.getTagsArray(), kv.getTagsOffset(), tagsLength);
}
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
// Copy memstore timestamp from the byte buffer to the output stream.
long memstoreTS = kv.getMvccVersion();
WritableUtils.writeVLong(out, memstoreTS);
// TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
// avoided.
size += WritableUtils.getVIntSize(memstoreTS);
}
return size;
}
示例8: internalEncode
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public int internalEncode(KeyValue kv, HFileBlockDefaultEncodingContext encodingContext,
DataOutputStream out) throws IOException {
byte[] kvBuf = kv.getBuffer();
int klength = kv.getKeyLength();
int vlength = kv.getValueLength();
EncodingState state = encodingContext.getEncodingState();
if (state.prevKv == null) {
// copy the key, there is no common prefix with none
ByteBufferUtils.putCompressedInt(out, klength);
ByteBufferUtils.putCompressedInt(out, vlength);
ByteBufferUtils.putCompressedInt(out, 0);
out.write(kvBuf, kv.getKeyOffset(), klength + vlength);
} else {
// find a common prefix and skip it
int common = ByteBufferUtils.findCommonPrefix(state.prevKv.getBuffer(),
state.prevKv.getKeyOffset(), state.prevKv.getKeyLength(), kvBuf, kv.getKeyOffset(),
kv.getKeyLength());
ByteBufferUtils.putCompressedInt(out, klength - common);
ByteBufferUtils.putCompressedInt(out, vlength);
ByteBufferUtils.putCompressedInt(out, common);
out.write(kvBuf, kv.getKeyOffset() + common, klength - common + vlength);
}
int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
size += afterEncodingKeyValue(kv, out, encodingContext);
state.prevKv = kv;
return size;
}