本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream类的典型用法代码示例。如果您正苦于以下问题:Java BufferGrabbingByteArrayOutputStream类的具体用法?Java BufferGrabbingByteArrayOutputStream怎么用?Java BufferGrabbingByteArrayOutputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BufferGrabbingByteArrayOutputStream类属于org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer包,在下文中一共展示了BufferGrabbingByteArrayOutputStream类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: encodeKeyValues
import org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream; //导入依赖的package包/类
static ByteBuffer encodeKeyValues(DataBlockEncoding encoding, List<KeyValue> kvs,
HFileBlockEncodingContext encodingContext) throws IOException {
DataBlockEncoder encoder = encoding.getEncoder();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
DataOutputStream dos = new DataOutputStream(baos);
encoder.startBlockEncoding(encodingContext, dos);
for (KeyValue kv : kvs) {
encoder.encode(kv, encodingContext, dos);
}
BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
baos.writeTo(stream);
encoder.endBlockEncoding(encodingContext, dos, stream.getBuffer());
byte[] encodedData = new byte[baos.size() - ENCODED_DATA_OFFSET];
System.arraycopy(baos.toByteArray(), ENCODED_DATA_OFFSET, encodedData, 0, encodedData.length);
return ByteBuffer.wrap(encodedData);
}
示例2: createBlockOnDisk
import org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
block.getHFileContext());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(block.getDummyHeaderForVersion());
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (KeyValue kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
baos.writeTo(stream);
blockEncoder.endBlockEncoding(context, dos, stream.getBuffer(), BlockType.DATA);
byte[] encodedBytes = baos.toByteArray();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
示例3: testEncodersOnDataset
import org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.BufferGrabbingByteArrayOutputStream; //导入依赖的package包/类
private void testEncodersOnDataset(List<KeyValue> kvList, boolean includesMemstoreTS,
boolean includesTags) throws IOException {
ByteBuffer unencodedDataBuf = RedundantKVGenerator.convertKvToByteBuffer(kvList,
includesMemstoreTS);
HFileContext fileContext = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(includesTags).build();
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
DataBlockEncoder encoder = encoding.getEncoder();
if (encoder == null) {
continue;
}
HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding,
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
DataOutputStream dos = new DataOutputStream(baos);
encoder.startBlockEncoding(encodingContext, dos);
for (KeyValue kv : kvList) {
encoder.encode(kv, encodingContext, dos);
}
BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
baos.writeTo(stream);
encoder.endBlockEncoding(encodingContext, dos, stream.getBuffer());
byte[] encodedData = baos.toByteArray();
testAlgorithm(encodedData, unencodedDataBuf, encoder);
}
}