本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.BlockType类的典型用法代码示例。如果您正苦于以下问题:Java BlockType类的具体用法?Java BlockType怎么用?Java BlockType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockType类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了BlockType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: encodeKeyValues
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
/**
* Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
* enough with the concept of the HFileBlockEncodingContext.
*/
@Override
public void encodeKeyValues(ByteBuffer in,
HFileBlockEncodingContext blkEncodingCtx) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx
= (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding();
DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
internalEncodeKeyValues(dataOut, in, encodingCtx.getHFileContext().isIncludesMvcc(),
encodingCtx.getHFileContext().isIncludesTags());
//do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
示例2: encodeKeyValues
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
/**
* Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
* enough with the concept of the HFileBlockEncodingContext.
*/
@Override
public void encodeKeyValues(ByteBuffer in, boolean includesMvccVersion,
HFileBlockEncodingContext blkEncodingCtx) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx
= (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding();
DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
internalEncodeKeyValues(dataOut, in, includesMvccVersion);
//do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
示例3: encodeKeyValues
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void encodeKeyValues(ByteBuffer in,
boolean includesMemstoreTS,
HFileBlockEncodingContext blkEncodingCtx) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException (this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " +
"encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx =
(HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding();
DataOutputStream dataOut =
((HFileBlockDefaultEncodingContext) encodingCtx)
.getOutputStreamForEncoder();
internalEncodeKeyValues(dataOut, in, includesMemstoreTS);
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
示例4: compressAfterEncoding
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
/**
* @param uncompressedBytesWithHeader
* @param blockType
* @param headerBytes
* @throws IOException
*/
protected void compressAfterEncoding(byte[] uncompressedBytesWithHeader,
BlockType blockType, byte[] headerBytes) throws IOException {
this.uncompressedBytesWithHeader = uncompressedBytesWithHeader;
if (compressionAlgorithm != NONE) {
compressedByteStream.reset();
compressedByteStream.write(headerBytes);
compressionStream.resetState();
compressionStream.write(uncompressedBytesWithHeader,
headerBytes.length, uncompressedBytesWithHeader.length
- headerBytes.length);
compressionStream.flush();
compressionStream.finish();
onDiskBytesWithHeader = compressedByteStream.toByteArray();
} else {
onDiskBytesWithHeader = uncompressedBytesWithHeader;
}
this.blockType = blockType;
}
示例5: contains
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset,
keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false, true,
BlockType.BLOOM_CHUNK, null);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf, bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例6: setBloomFilterFaulty
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
private void setBloomFilterFaulty(BlockType blockType) {
if (blockType == BlockType.GENERAL_BLOOM_META) {
setGeneralBloomFilterFaulty();
} else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) {
setDeleteFamilyBloomFilterFaulty();
}
}
示例7: endBlockEncoding
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
byte[] uncompressedBytesWithHeader) throws IOException {
PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
PrefixTreeEncoder builder = state.builder;
builder.flush();
EncoderFactory.checkIn(builder);
// do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
示例8: endBlockEncoding
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
byte[] uncompressedBytesWithHeader) throws IOException {
BufferedDataBlockEncodingState state = (BufferedDataBlockEncodingState) encodingCtx
.getEncodingState();
// Write the unencodedDataSizeWritten (with header size)
Bytes.putInt(uncompressedBytesWithHeader, HConstants.HFILEBLOCK_HEADER_SIZE
+ DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten
);
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
示例9: contains
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false,
BlockType.BLOOM_CHUNK);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例10: testIncrements
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Test
public void testIncrements() {
Random rand = new Random(23982737L);
for (int i = 1; i <= 3; ++i) {
final String tableName = "table" + i;
for (int j = 1; j <= 3; ++j) {
final String cfName = "cf" + j;
SchemaMetrics sm = SchemaMetrics.getInstance(tableName, cfName);
for (boolean isInBloom : BOOL_VALUES) {
sm.updateBloomMetrics(isInBloom);
checkMetrics();
}
for (BlockCategory blockCat : BlockType.BlockCategory.values()) {
if (blockCat == BlockCategory.ALL_CATEGORIES) {
continue;
}
for (boolean isCompaction : BOOL_VALUES) {
sm.updateOnCacheHit(blockCat, isCompaction);
checkMetrics();
sm.updateOnCacheMiss(blockCat, isCompaction, rand.nextInt());
checkMetrics();
}
for (boolean isEviction : BOOL_VALUES) {
sm.updateOnCachePutOrEvict(blockCat, (isEviction ? -1 : 1)
* rand.nextInt(1024 * 1024), isEviction);
}
}
}
}
}
示例11: setBloomFilterFaulty
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
private void setBloomFilterFaulty(BlockType blockType) {
if (blockType == BlockType.GENERAL_BLOOM_META) {
setGeneralBloomFilterFaulty();
} else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) {
setDeleteFamilyBloomFilterFaulty();
}
}
示例12: contains
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false, true,
BlockType.BLOOM_CHUNK);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例13: encodeKeyValues
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void encodeKeyValues(ByteBuffer in,
HFileBlockEncodingContext blkEncodingCtx) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException (this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " +
"encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx =
(HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding();
DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
if (encodingCtx.getHFileContext().isIncludesTags()
&& encodingCtx.getHFileContext().isCompressTags()) {
if (encodingCtx.getTagCompressionContext() != null) {
// It will be overhead to create the TagCompressionContext again and again for every block
// encoding.
encodingCtx.getTagCompressionContext().clear();
} else {
try {
TagCompressionContext tagCompressionContext = new TagCompressionContext(
LRUDictionary.class, Byte.MAX_VALUE);
encodingCtx.setTagCompressionContext(tagCompressionContext);
} catch (Exception e) {
throw new IOException("Failed to initialize TagCompressionContext", e);
}
}
}
internalEncodeKeyValues(dataOut, in, encodingCtx);
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
示例14: postEncoding
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
@Override
public void postEncoding(BlockType blockType)
throws IOException {
dataOut.flush();
compressAfterEncodingWithBlockType(encodedStream.toByteArray(), blockType);
this.blockType = blockType;
}
示例15: postEncoding
import org.apache.hadoop.hbase.io.hfile.BlockType; //导入依赖的package包/类
protected void postEncoding(HFileBlockEncodingContext encodingCtx)
throws IOException {
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}