当前位置: 首页>>代码示例>>Java>>正文


Java BlockIndexReader类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader的典型用法代码示例。如果您正苦于以下问题:Java BlockIndexReader类的具体用法?Java BlockIndexReader怎么用?Java BlockIndexReader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockIndexReader类属于org.apache.hadoop.hbase.io.hfile.HFileBlockIndex包,在下文中一共展示了BlockIndexReader类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testHeapSizeForBlockIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  expected -= ClassSize.align(3 * ClassSize.ARRAY);

  if (expected != actual) {
    ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFileBlockIndex.java

示例2: testHeapSizeForBlockIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.BlockIndexReader(Bytes.BYTES_RAWCOMPARATOR, 1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  expected -= ClassSize.align(3 * ClassSize.ARRAY);

  if (expected != actual) {
    ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:TestHFileBlockIndex.java

示例3: testHeapSizeForBlockIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  // Already the block keys are not there in this case
  expected -= ClassSize.align(2 * ClassSize.ARRAY);

  if (expected != actual) {
    expected = ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:TestHFileBlockIndex.java

示例4: testHeapSizeForBlockIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
//@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  expected -= ClassSize.align(3 * ClassSize.ARRAY);

  if (expected != actual) {
    ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:22,代码来源:TestHFileBlockIndex.java

示例5: readIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
private void readIndex(boolean useTags) throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withCompression(compr)
                      .build();
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(istream, fs.getFileStatus(path)
      .getLen(), meta);

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.BlockIndexReader(
          KeyValue.RAW_COMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    HFileBlock b =
        indexReader.seekToDataBlock(new KeyValue.KeyOnlyKeyValue(key, 0, key.length), null, true,
          true, false, null);
    if (KeyValue.COMPARATOR.compareFlatKey(key, firstKeyInFile) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:58,代码来源:TestHFileBlockIndex.java

示例6: scan

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
@Override
public HoplogIterator<byte[], byte[]> scan(long offset, long length)
    throws IOException {
  /**
   * Identifies the first and last key to be scanned based on offset and
   * length. It loads hfile block index and identifies the first hfile block
   * starting after offset. The key of that block is from key for scanner.
   * Similarly it locates first block starting beyond offset + length range.
   * It uses key of that block as the to key for scanner
   */

  // load block indexes in memory
  BlockIndexReader bir = reader.getDataBlockIndexReader();
  int blockCount = bir.getRootBlockCount();
  
  byte[] fromKey = null, toKey = null;

  // find from key
  int i = 0;
  for (; i < blockCount; i++) {
    if (bir.getRootBlockOffset(i) < offset) {
      // hfile block has offset less than this reader's split offset. check
      // the next block
      continue;
    }

    // found the first hfile block starting after offset
    fromKey = bir.getRootBlockKey(i);
    break;
  }

  if (fromKey == null) {
    // seems no block starts after the offset. return no-op scanner
    return new HFileSortedIterator(null, null, false, null, false);
  }
  
  // find to key
  for (; i < blockCount; i++) {
    if (bir.getRootBlockOffset(i) < (offset + length)) {
      // this hfile block lies within the offset+lenght range. check the
      // next block for a higher offset
      continue;
    }

    // found the first block starting beyong offset+length range.
    toKey = bir.getRootBlockKey(i);
    break;
  }

  // from key is included in scan and to key is excluded
  HFileScanner scanner = reader.getScanner(true, false);
  return new HFileSortedIterator(scanner, fromKey, true, toKey, false);
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:54,代码来源:HFileSortedOplog.java

示例7: testNHoplogNBlockIter

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void testNHoplogNBlockIter() throws Exception {
  Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
  Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
      blockCache, stats, storeStats);
  createHoplog(2000, oplog);
  
  FileSystem fs = hdfsStore.getFileSystem();
  Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
  BlockIndexReader bir = reader.getDataBlockIndexReader();
  int blockCount = bir.getRootBlockCount();
  reader.close();
  
  // make sure there are more than 1 hfile blocks in the hoplog
  assertTrue(1 < blockCount);
  
  Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
  oplog = new HFileSortedOplog(hdfsStore, path2,
      blockCache, stats, storeStats);
  createHoplog(2000, oplog);

  Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
  oplog = new HFileSortedOplog(hdfsStore, path3,
      blockCache, stats, storeStats);
  createHoplog(2000, oplog);
  
  Path[] paths = {path1, path2, path3, path1, path2, path3};
  long half = oplog.getSize()/2;
  long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
  long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
  HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
      hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
  
  int[] keyCounts = new int[2000];
  while (iter.hasNext()) {
    boolean success = iter.next();
    assertTrue(success);
    String key = new String((byte[])iter.getKey()).substring("key-".length());
    keyCounts[Integer.valueOf(key) - 100000] ++;
  }
  
  for (int i : keyCounts) {
    assertEquals(3, i);
  }
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:45,代码来源:HDFSSplitIteratorJUnitTest.java

示例8: readIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex() throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream,
      compr, fs.getFileStatus(path).getLen());

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.BlockIndexReader(
          Bytes.BYTES_RAWCOMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null,
        true, true, false);
    if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:51,代码来源:TestHFileBlockIndex.java

示例9: readIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex(boolean useTags) throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withCompression(compr)
                      .build();
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
      .getLen(), meta);

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.BlockIndexReader(
          KeyValue.RAW_COMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null,
        true, true, false);
    if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:57,代码来源:TestHFileBlockIndex.java

示例10: readIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
private void readIndex(boolean useTags) throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withCompression(compr)
                      .build();
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(istream, fs.getFileStatus(path)
      .getLen(), meta);

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.CellBasedKeyBlockIndexReader(
          CellComparatorImpl.COMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    KeyValue.KeyOnlyKeyValue keyOnlyKey = new KeyValue.KeyOnlyKeyValue(key, 0, key.length);
    HFileBlock b =
        indexReader.seekToDataBlock(keyOnlyKey, null, true,
          true, false, null);
    if (PrivateCellUtil.compare(CellComparatorImpl.COMPARATOR, keyOnlyKey, firstKeyInFile, 0,
      firstKeyInFile.length) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:60,代码来源:TestHFileBlockIndex.java

示例11: readIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex(boolean useTags) throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withCompression(compr)
                      .build();
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream, fs.getFileStatus(path)
      .getLen(), meta);

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.BlockIndexReader(
          KeyValue.RAW_COMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    HFileBlock b = indexReader.seekToDataBlock(new KeyValue.KeyOnlyKeyValue(key, 0, key.length),
        null,
        true, true, false, null);
    if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:58,代码来源:TestHFileBlockIndex.java

示例12: readIndex

import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; //导入依赖的package包/类
public void readIndex() throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(istream,
      compr, fs.getFileStatus(path).getLen());

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.BlockIndexReader(
          KeyValue.RAW_COMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null,
        true, true, false);
    if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:51,代码来源:TestHFileBlockIndex.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。