本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext.getUncompressedBytesWithHeader方法的典型用法代码示例。如果您正苦于以下问题:Java HFileBlockEncodingContext.getUncompressedBytesWithHeader方法的具体用法?Java HFileBlockEncodingContext.getUncompressedBytesWithHeader怎么用?Java HFileBlockEncodingContext.getUncompressedBytesWithHeader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext
的用法示例。
在下文中一共展示了HFileBlockEncodingContext.getUncompressedBytesWithHeader方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: encodeDataBlock
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
private HFileBlock encodeDataBlock(HFileBlock block,
DataBlockEncoding algo, boolean includesMemstoreTS,
HFileBlockEncodingContext encodingCtx) {
encodeBufferToHFileBlockBuffer(
block.getBufferWithoutHeader(), algo, includesMemstoreTS, encodingCtx);
byte[] encodedUncompressedBytes =
encodingCtx.getUncompressedBytesWithHeader();
ByteBuffer bufferWrapper = ByteBuffer.wrap(encodedUncompressedBytes);
int sizeWithoutHeader = bufferWrapper.limit() - encodingCtx.getHeaderSize();
HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
block.getOnDiskSizeWithoutHeader(),
sizeWithoutHeader, block.getPrevBlockOffset(),
bufferWrapper, HFileBlock.FILL_HEADER, block.getOffset(),
includesMemstoreTS, block.getMinorVersion(),
block.getBytesPerChecksum(), block.getChecksumType(),
block.getOnDiskDataSizeWithHeader());
return encodedBlock;
}
示例2: createBlockOnDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
private HFileBlock createBlockOnDisk(HFileBlock block, boolean useTags) throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(),
HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
context.setDummyHeader(block.getDummyHeaderForVersion());
blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(), context, block.getBlockType());
byte[] encodedBytes = context.getUncompressedBytesWithHeader();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1,
ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0,
block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
示例3: createBlockOnDisk
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
private HFileBlock createBlockOnDisk(HFileBlock block) throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
Compression.Algorithm.NONE, blockEncoder.getDataBlockEncoding(),
HConstants.HFILEBLOCK_DUMMY_HEADER);
context.setDummyHeader(block.getDummyHeaderForVersion());
blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(),
includesMemstoreTS, context, block.getBlockType());
byte[] encodedBytes = context.getUncompressedBytesWithHeader();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1,
ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0, includesMemstoreTS,
block.getMinorVersion(), block.getBytesPerChecksum(), block.getChecksumType(),
block.getOnDiskDataSizeWithHeader());
}
示例4: testEncodingWritePath
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
/**
* Test writing to disk.
* @throws IOException
*/
@Test
public void testEncodingWritePath() throws IOException {
// usually we have just block without headers, but don't complicate that
HFileBlock block = getSampleHFileBlock();
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
Compression.Algorithm.NONE, blockEncoder.getEncodingOnDisk(), HFileBlock.DUMMY_HEADER);
blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(),
includesMemstoreTS, context, block.getBlockType());
byte[] encodedBytes = context.getUncompressedBytesWithHeader();
int size = encodedBytes.length - HFileBlock.HEADER_SIZE;
HFileBlock blockOnDisk =
new HFileBlock(context.getBlockType(), size, size, -1,
ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0,
includesMemstoreTS, block.getMinorVersion(),
block.getBytesPerChecksum(), block.getChecksumType(),
block.getOnDiskDataSizeWithHeader());
if (blockEncoder.getEncodingOnDisk() !=
DataBlockEncoding.NONE) {
assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType());
assertEquals(blockEncoder.getEncodingOnDisk().getId(),
blockOnDisk.getDataBlockEncodingId());
} else {
assertEquals(BlockType.DATA, blockOnDisk.getBlockType());
}
}
示例5: writeEncodedBlock
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
DataOutputStream dos, final List<Integer> encodedSizes,
final List<ByteBuffer> encodedBlocks, int blockId,
boolean includesMemstoreTS, byte[] dummyHeader, boolean useTag) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DoubleOutputStream doubleOutputStream =
new DoubleOutputStream(dos, baos);
writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS, useTag);
ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
rawBuf.rewind();
DataBlockEncoder encoder = encoding.getEncoder();
int headerLen = dummyHeader.length;
byte[] encodedResultWithHeader = null;
HFileContext meta = new HFileContextBuilder()
.withCompression(algo)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTag)
.build();
if (encoder != null) {
HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
dummyHeader, meta);
encoder.encodeKeyValues(rawBuf, encodingCtx);
encodedResultWithHeader =
encodingCtx.getUncompressedBytesWithHeader();
} else {
HFileBlockDefaultEncodingContext defaultEncodingCtx = new HFileBlockDefaultEncodingContext(
encoding, dummyHeader, meta);
byte[] rawBufWithHeader =
new byte[rawBuf.array().length + headerLen];
System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
headerLen, rawBuf.array().length);
defaultEncodingCtx.compressAfterEncodingWithBlockType(rawBufWithHeader,
BlockType.DATA);
encodedResultWithHeader =
defaultEncodingCtx.getUncompressedBytesWithHeader();
}
final int encodedSize =
encodedResultWithHeader.length - headerLen;
if (encoder != null) {
// We need to account for the two-byte encoding algorithm ID that
// comes after the 24-byte block header but before encoded KVs.
headerLen += DataBlockEncoding.ID_SIZE;
}
byte[] encodedDataSection =
new byte[encodedResultWithHeader.length - headerLen];
System.arraycopy(encodedResultWithHeader, headerLen,
encodedDataSection, 0, encodedDataSection.length);
final ByteBuffer encodedBuf =
ByteBuffer.wrap(encodedDataSection);
encodedSizes.add(encodedSize);
encodedBlocks.add(encodedBuf);
}
示例6: writeEncodedBlock
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
DataOutputStream dos, final List<Integer> encodedSizes,
final List<ByteBuffer> encodedBlocks, int blockId,
boolean includesMemstoreTS, byte[] dummyHeader) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DoubleOutputStream doubleOutputStream =
new DoubleOutputStream(dos, baos);
writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS);
ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
rawBuf.rewind();
DataBlockEncoder encoder = encoding.getEncoder();
int headerLen = dummyHeader.length;
byte[] encodedResultWithHeader = null;
if (encoder != null) {
HFileBlockEncodingContext encodingCtx =
encoder.newDataBlockEncodingContext(algo, encoding, dummyHeader);
encoder.encodeKeyValues(rawBuf, includesMemstoreTS,
encodingCtx);
encodedResultWithHeader =
encodingCtx.getUncompressedBytesWithHeader();
} else {
HFileBlockDefaultEncodingContext defaultEncodingCtx =
new HFileBlockDefaultEncodingContext(algo, encoding, dummyHeader);
byte[] rawBufWithHeader =
new byte[rawBuf.array().length + headerLen];
System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
headerLen, rawBuf.array().length);
defaultEncodingCtx.compressAfterEncodingWithBlockType(rawBufWithHeader,
BlockType.DATA);
encodedResultWithHeader =
defaultEncodingCtx.getUncompressedBytesWithHeader();
}
final int encodedSize =
encodedResultWithHeader.length - headerLen;
if (encoder != null) {
// We need to account for the two-byte encoding algorithm ID that
// comes after the 24-byte block header but before encoded KVs.
headerLen += DataBlockEncoding.ID_SIZE;
}
byte[] encodedDataSection =
new byte[encodedResultWithHeader.length - headerLen];
System.arraycopy(encodedResultWithHeader, headerLen,
encodedDataSection, 0, encodedDataSection.length);
final ByteBuffer encodedBuf =
ByteBuffer.wrap(encodedDataSection);
encodedSizes.add(encodedSize);
encodedBlocks.add(encodedBuf);
}
示例7: writeEncodedBlock
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
DataOutputStream dos, final List<Integer> encodedSizes,
final List<ByteBuffer> encodedBlocks, int blockId,
boolean includesMemstoreTS, byte[] dummyHeader) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DoubleOutputStream doubleOutputStream =
new DoubleOutputStream(dos, baos);
writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS);
ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
rawBuf.rewind();
DataBlockEncoder encoder = encoding.getEncoder();
int headerLen = dummyHeader.length;
byte[] encodedResultWithHeader = null;
if (encoder != null) {
HFileBlockEncodingContext encodingCtx =
encoder.newDataBlockEncodingContext(algo, encoding, dummyHeader);
encoder.encodeKeyValues(rawBuf, includesMemstoreTS,
encodingCtx);
encodedResultWithHeader =
encodingCtx.getUncompressedBytesWithHeader();
} else {
HFileBlockDefaultEncodingContext defaultEncodingCtx =
new HFileBlockDefaultEncodingContext(algo, encoding, dummyHeader);
byte[] rawBufWithHeader =
new byte[rawBuf.array().length + headerLen];
System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
headerLen, rawBuf.array().length);
defaultEncodingCtx.compressAfterEncoding(rawBufWithHeader,
BlockType.DATA);
encodedResultWithHeader =
defaultEncodingCtx.getUncompressedBytesWithHeader();
}
final int encodedSize =
encodedResultWithHeader.length - headerLen;
if (encoder != null) {
// We need to account for the two-byte encoding algorithm ID that
// comes after the 24-byte block header but before encoded KVs.
headerLen += DataBlockEncoding.ID_SIZE;
}
byte[] encodedDataSection =
new byte[encodedResultWithHeader.length - headerLen];
System.arraycopy(encodedResultWithHeader, headerLen,
encodedDataSection, 0, encodedDataSection.length);
final ByteBuffer encodedBuf =
ByteBuffer.wrap(encodedDataSection);
encodedSizes.add(encodedSize);
encodedBlocks.add(encodedBuf);
}