本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader类的典型用法代码示例。如果您正苦于以下问题:Java BlockIndexReader类的具体用法?Java BlockIndexReader怎么用?Java BlockIndexReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockIndexReader类属于org.apache.hadoop.hbase.io.hfile.HFileBlockIndex包,在下文中一共展示了BlockIndexReader类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testHeapSizeForBlockIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
Class<HFileBlockIndex.BlockIndexReader> cl =
HFileBlockIndex.BlockIndexReader.class;
long expected = ClassSize.estimateBase(cl, false);
HFileBlockIndex.BlockIndexReader bi =
new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
long actual = bi.heapSize();
// Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
// int [] blockDataSizes) are all null they are not going to show up in the
// HeapSize calculation, so need to remove those array costs from expected.
expected -= ClassSize.align(3 * ClassSize.ARRAY);
if (expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}
}
示例2: testHeapSizeForBlockIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
Class<HFileBlockIndex.BlockIndexReader> cl =
HFileBlockIndex.BlockIndexReader.class;
long expected = ClassSize.estimateBase(cl, false);
HFileBlockIndex.BlockIndexReader bi =
new HFileBlockIndex.BlockIndexReader(Bytes.BYTES_RAWCOMPARATOR, 1);
long actual = bi.heapSize();
// Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
// int [] blockDataSizes) are all null they are not going to show up in the
// HeapSize calculation, so need to remove those array costs from expected.
expected -= ClassSize.align(3 * ClassSize.ARRAY);
if (expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}
}
示例3: testHeapSizeForBlockIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
Class<HFileBlockIndex.BlockIndexReader> cl =
HFileBlockIndex.BlockIndexReader.class;
long expected = ClassSize.estimateBase(cl, false);
HFileBlockIndex.BlockIndexReader bi =
new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
long actual = bi.heapSize();
// Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
// int [] blockDataSizes) are all null they are not going to show up in the
// HeapSize calculation, so need to remove those array costs from expected.
// Already the block keys are not there in this case
expected -= ClassSize.align(2 * ClassSize.ARRAY);
if (expected != actual) {
expected = ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}
}
示例4: testHeapSizeForBlockIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
//@Test
public void testHeapSizeForBlockIndex() throws IOException {
Class<HFileBlockIndex.BlockIndexReader> cl =
HFileBlockIndex.BlockIndexReader.class;
long expected = ClassSize.estimateBase(cl, false);
HFileBlockIndex.BlockIndexReader bi =
new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
long actual = bi.heapSize();
// Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
// int [] blockDataSizes) are all null they are not going to show up in the
// HeapSize calculation, so need to remove those array costs from expected.
expected -= ClassSize.align(3 * ClassSize.ARRAY);
if (expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}
}
示例5: readIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
private void readIndex(boolean useTags) throws IOException {
long fileSize = fs.getFileStatus(path).getLen();
LOG.info("Size of " + path + ": " + fileSize);
FSDataInputStream istream = fs.open(path);
HFileContext meta = new HFileContextBuilder()
.withHBaseCheckSum(true)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTags)
.withCompression(compr)
.build();
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(istream, fs.getFileStatus(path)
.getLen(), meta);
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.BlockIndexReader(
KeyValue.RAW_COMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
long prevOffset = -1;
int i = 0;
int expectedHitCount = 0;
int expectedMissCount = 0;
LOG.info("Total number of keys: " + keys.size());
for (byte[] key : keys) {
assertTrue(key != null);
assertTrue(indexReader != null);
HFileBlock b =
indexReader.seekToDataBlock(new KeyValue.KeyOnlyKeyValue(key, 0, key.length), null, true,
true, false, null);
if (KeyValue.COMPARATOR.compareFlatKey(key, firstKeyInFile) < 0) {
assertTrue(b == null);
++i;
continue;
}
String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);
assertTrue("seekToDataBlock failed for " + keyStr, b != null);
if (prevOffset == b.getOffset()) {
assertEquals(++expectedHitCount, brw.hitCount);
} else {
LOG.info("First key in a new block: " + keyStr + ", block offset: "
+ b.getOffset() + ")");
assertTrue(b.getOffset() > prevOffset);
assertEquals(++expectedMissCount, brw.missCount);
prevOffset = b.getOffset();
}
++i;
}
istream.close();
}
示例6: scan
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
@Override
public HoplogIterator<byte[], byte[]> scan(long offset, long length)
throws IOException {
/**
* Identifies the first and last key to be scanned based on offset and
* length. It loads hfile block index and identifies the first hfile block
* starting after offset. The key of that block is from key for scanner.
* Similarly it locates first block starting beyond offset + length range.
* It uses key of that block as the to key for scanner
*/
// load block indexes in memory
BlockIndexReader bir = reader.getDataBlockIndexReader();
int blockCount = bir.getRootBlockCount();
byte[] fromKey = null, toKey = null;
// find from key
int i = 0;
for (; i < blockCount; i++) {
if (bir.getRootBlockOffset(i) < offset) {
// hfile block has offset less than this reader's split offset. check
// the next block
continue;
}
// found the first hfile block starting after offset
fromKey = bir.getRootBlockKey(i);
break;
}
if (fromKey == null) {
// seems no block starts after the offset. return no-op scanner
return new HFileSortedIterator(null, null, false, null, false);
}
// find to key
for (; i < blockCount; i++) {
if (bir.getRootBlockOffset(i) < (offset + length)) {
// this hfile block lies within the offset+lenght range. check the
// next block for a higher offset
continue;
}
// found the first block starting beyong offset+length range.
toKey = bir.getRootBlockKey(i);
break;
}
// from key is included in scan and to key is excluded
HFileScanner scanner = reader.getScanner(true, false);
return new HFileSortedIterator(scanner, fromKey, true, toKey, false);
}
示例7: testNHoplogNBlockIter
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void testNHoplogNBlockIter() throws Exception {
Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
blockCache, stats, storeStats);
createHoplog(2000, oplog);
FileSystem fs = hdfsStore.getFileSystem();
Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
BlockIndexReader bir = reader.getDataBlockIndexReader();
int blockCount = bir.getRootBlockCount();
reader.close();
// make sure there are more than 1 hfile blocks in the hoplog
assertTrue(1 < blockCount);
Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
oplog = new HFileSortedOplog(hdfsStore, path2,
blockCache, stats, storeStats);
createHoplog(2000, oplog);
Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
oplog = new HFileSortedOplog(hdfsStore, path3,
blockCache, stats, storeStats);
createHoplog(2000, oplog);
Path[] paths = {path1, path2, path3, path1, path2, path3};
long half = oplog.getSize()/2;
long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
int[] keyCounts = new int[2000];
while (iter.hasNext()) {
boolean success = iter.next();
assertTrue(success);
String key = new String((byte[])iter.getKey()).substring("key-".length());
keyCounts[Integer.valueOf(key) - 100000] ++;
}
for (int i : keyCounts) {
assertEquals(3, i);
}
}
示例8: readIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex() throws IOException {
long fileSize = fs.getFileStatus(path).getLen();
LOG.info("Size of " + path + ": " + fileSize);
FSDataInputStream istream = fs.open(path);
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream,
compr, fs.getFileStatus(path).getLen());
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.BlockIndexReader(
Bytes.BYTES_RAWCOMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
long prevOffset = -1;
int i = 0;
int expectedHitCount = 0;
int expectedMissCount = 0;
LOG.info("Total number of keys: " + keys.size());
for (byte[] key : keys) {
assertTrue(key != null);
assertTrue(indexReader != null);
HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null,
true, true, false);
if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
assertTrue(b == null);
++i;
continue;
}
String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);
assertTrue("seekToDataBlock failed for " + keyStr, b != null);
if (prevOffset == b.getOffset()) {
assertEquals(++expectedHitCount, brw.hitCount);
} else {
LOG.info("First key in a new block: " + keyStr + ", block offset: "
+ b.getOffset() + ")");
assertTrue(b.getOffset() > prevOffset);
assertEquals(++expectedMissCount, brw.missCount);
prevOffset = b.getOffset();
}
++i;
}
istream.close();
}
示例9: readIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex(boolean useTags) throws IOException {
long fileSize = fs.getFileStatus(path).getLen();
LOG.info("Size of " + path + ": " + fileSize);
FSDataInputStream istream = fs.open(path);
HFileContext meta = new HFileContextBuilder()
.withHBaseCheckSum(true)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTags)
.withCompression(compr)
.build();
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
.getLen(), meta);
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.BlockIndexReader(
KeyValue.RAW_COMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
long prevOffset = -1;
int i = 0;
int expectedHitCount = 0;
int expectedMissCount = 0;
LOG.info("Total number of keys: " + keys.size());
for (byte[] key : keys) {
assertTrue(key != null);
assertTrue(indexReader != null);
HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null,
true, true, false);
if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
assertTrue(b == null);
++i;
continue;
}
String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);
assertTrue("seekToDataBlock failed for " + keyStr, b != null);
if (prevOffset == b.getOffset()) {
assertEquals(++expectedHitCount, brw.hitCount);
} else {
LOG.info("First key in a new block: " + keyStr + ", block offset: "
+ b.getOffset() + ")");
assertTrue(b.getOffset() > prevOffset);
assertEquals(++expectedMissCount, brw.missCount);
prevOffset = b.getOffset();
}
++i;
}
istream.close();
}
示例10: readIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
private void readIndex(boolean useTags) throws IOException {
long fileSize = fs.getFileStatus(path).getLen();
LOG.info("Size of " + path + ": " + fileSize);
FSDataInputStream istream = fs.open(path);
HFileContext meta = new HFileContextBuilder()
.withHBaseCheckSum(true)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTags)
.withCompression(compr)
.build();
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(istream, fs.getFileStatus(path)
.getLen(), meta);
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.CellBasedKeyBlockIndexReader(
CellComparatorImpl.COMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
long prevOffset = -1;
int i = 0;
int expectedHitCount = 0;
int expectedMissCount = 0;
LOG.info("Total number of keys: " + keys.size());
for (byte[] key : keys) {
assertTrue(key != null);
assertTrue(indexReader != null);
KeyValue.KeyOnlyKeyValue keyOnlyKey = new KeyValue.KeyOnlyKeyValue(key, 0, key.length);
HFileBlock b =
indexReader.seekToDataBlock(keyOnlyKey, null, true,
true, false, null);
if (PrivateCellUtil.compare(CellComparatorImpl.COMPARATOR, keyOnlyKey, firstKeyInFile, 0,
firstKeyInFile.length) < 0) {
assertTrue(b == null);
++i;
continue;
}
String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);
assertTrue("seekToDataBlock failed for " + keyStr, b != null);
if (prevOffset == b.getOffset()) {
assertEquals(++expectedHitCount, brw.hitCount);
} else {
LOG.info("First key in a new block: " + keyStr + ", block offset: "
+ b.getOffset() + ")");
assertTrue(b.getOffset() > prevOffset);
assertEquals(++expectedMissCount, brw.missCount);
prevOffset = b.getOffset();
}
++i;
}
istream.close();
}
示例11: readIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex(boolean useTags) throws IOException {
long fileSize = fs.getFileStatus(path).getLen();
LOG.info("Size of " + path + ": " + fileSize);
FSDataInputStream istream = fs.open(path);
HFileContext meta = new HFileContextBuilder()
.withHBaseCheckSum(true)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTags)
.withCompression(compr)
.build();
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
.getLen(), meta);
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.BlockIndexReader(
KeyValue.RAW_COMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
long prevOffset = -1;
int i = 0;
int expectedHitCount = 0;
int expectedMissCount = 0;
LOG.info("Total number of keys: " + keys.size());
for (byte[] key : keys) {
assertTrue(key != null);
assertTrue(indexReader != null);
HFileBlock b = indexReader.seekToDataBlock(new KeyValue.KeyOnlyKeyValue(key, 0, key.length),
null,
true, true, false, null);
if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
assertTrue(b == null);
++i;
continue;
}
String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);
assertTrue("seekToDataBlock failed for " + keyStr, b != null);
if (prevOffset == b.getOffset()) {
assertEquals(++expectedHitCount, brw.hitCount);
} else {
LOG.info("First key in a new block: " + keyStr + ", block offset: "
+ b.getOffset() + ")");
assertTrue(b.getOffset() > prevOffset);
assertEquals(++expectedMissCount, brw.missCount);
prevOffset = b.getOffset();
}
++i;
}
istream.close();
}
示例12: readIndex
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex() throws IOException {
long fileSize = fs.getFileStatus(path).getLen();
LOG.info("Size of " + path + ": " + fileSize);
FSDataInputStream istream = fs.open(path);
HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream,
compr, fs.getFileStatus(path).getLen());
BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
HFileBlockIndex.BlockIndexReader indexReader =
new HFileBlockIndex.BlockIndexReader(
KeyValue.RAW_COMPARATOR, numLevels, brw);
indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);
long prevOffset = -1;
int i = 0;
int expectedHitCount = 0;
int expectedMissCount = 0;
LOG.info("Total number of keys: " + keys.size());
for (byte[] key : keys) {
assertTrue(key != null);
assertTrue(indexReader != null);
HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null,
true, true, false);
if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
assertTrue(b == null);
++i;
continue;
}
String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);
assertTrue("seekToDataBlock failed for " + keyStr, b != null);
if (prevOffset == b.getOffset()) {
assertEquals(++expectedHitCount, brw.hitCount);
} else {
LOG.info("First key in a new block: " + keyStr + ", block offset: "
+ b.getOffset() + ")");
assertTrue(b.getOffset() > prevOffset);
assertEquals(++expectedMissCount, brw.missCount);
prevOffset = b.getOffset();
}
++i;
}
istream.close();
}