本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader方法的典型用法代码示例。如果您正苦于以下问题:Java HFileBlockIndex.BlockIndexReader方法的具体用法?Java HFileBlockIndex.BlockIndexReader怎么用?Java HFileBlockIndex.BlockIndexReader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFileBlockIndex
的用法示例。
在下文中一共展示了HFileBlockIndex.BlockIndexReader方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: CompoundBloomFilter
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; //导入方法依赖的package包/类
/**
* De-serialization for compound Bloom filter metadata. Must be consistent
* with what {@link CompoundBloomFilterWriter} does.
*
* @param meta serialized Bloom filter metadata without any magic blocks
* @throws IOException
*/
public CompoundBloomFilter(DataInput meta, HFile.Reader reader)
throws IOException {
this.reader = reader;
totalByteSize = meta.readLong();
hashCount = meta.readInt();
hashType = meta.readInt();
totalKeyCount = meta.readLong();
totalMaxKeys = meta.readLong();
numChunks = meta.readInt();
comparator = FixedFileTrailer.createComparator(
Bytes.toString(Bytes.readByteArray(meta)));
hash = Hash.getInstance(hashType);
if (hash == null) {
throw new IllegalArgumentException("Invalid hash type: " + hashType);
}
index = new HFileBlockIndex.BlockIndexReader(comparator, 1);
index.readRootIndex(meta, numChunks);
}
示例2: addStoreFileCutpoints
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; //导入方法依赖的package包/类
private static int addStoreFileCutpoints(List<byte[]> cutpoints, HFile.Reader fileReader, long storeFileInBytes, int carry, Pair<byte[], byte[]> range, int splitBlockSize) throws IOException {
HFileBlockIndex.BlockIndexReader indexReader = fileReader.getDataBlockIndexReader();
int size = indexReader.getRootBlockCount();
int levels = fileReader.getTrailer().getNumDataIndexLevels();
if (levels == 1) {
int incrementalSize = (int) (size > 0 ? storeFileInBytes / (float) size : storeFileInBytes);
int sizeCounter = 0;
for (int i = 0; i < size; ++i) {
if (sizeCounter >= splitBlockSize) {
sizeCounter = 0;
KeyValue tentative = KeyValue.createKeyValueFromKey(indexReader.getRootBlockKey(i));
if (CellUtils.isKeyValueInRange(tentative, range)) {
cutpoints.add(tentative.getRow());
}
}
sizeCounter += incrementalSize;
}
return sizeCounter;
} else {
for (int i = 0; i < size; ++i) {
HFileBlock block = fileReader.readBlock(
indexReader.getRootBlockOffset(i),
indexReader.getRootBlockDataSize(i),
true, true, false, true,
levels == 2 ? BlockType.LEAF_INDEX : BlockType.INTERMEDIATE_INDEX,
fileReader.getDataBlockEncoding());
carry = addIndexCutpoints(fileReader, block.getBufferWithoutHeader(), levels - 1, cutpoints, storeFileInBytes / size, carry, range, splitBlockSize);
}
return carry;
}
}