当前位置: 首页>>代码示例>>Java>>正文


Java HFilePrettyPrinter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter的典型用法代码示例。如果您正苦于以下问题:Java HFilePrettyPrinter类的具体用法?Java HFilePrettyPrinter怎么用?Java HFilePrettyPrinter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HFilePrettyPrinter类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了HFilePrettyPrinter类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: scanColSet

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = (RegionScannerImpl) region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReader().getHFileReader().getPath();
      Path p2 = s2.getReader().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFile.Reader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReader();

  new HFilePrettyPrinter(conf).run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<Cell> allResults = new ArrayList<Cell>();

  { // Limit the scope of results.
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<Integer>();
  for (Cell kv : allResults) {
    String qual = Bytes.toString(CellUtil.cloneQualifier(kv));
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<Integer>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:TestScanWithBloomError.java

示例2: runMergeWorkload

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
public void runMergeWorkload() throws IOException {
  long maxKeyCount = prepareForMerge();

  List<StoreFileScanner> scanners =
      StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
          false);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(
      HFileReadWriteTest.class.getSimpleName());
  columnDescriptor.setBlocksize(blockSize);
  columnDescriptor.setBloomFilterType(bloomType);
  columnDescriptor.setCompressionType(compression);
  columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
  HRegionInfo regionInfo = new HRegionInfo();
  HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
  HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
      null);
  Store store = new Store(outputDir, region, columnDescriptor, fs, conf);

  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
      new CacheConfig(conf), fs, blockSize)
          .withOutputDir(outputDir)
          .withCompression(compression)
          .withDataBlockEncoder(dataBlockEncoder)
          .withBloomType(bloomType)
          .withMaxKeyCount(maxKeyCount)
          .withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
          .withBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
          .build();

  StatisticsPrinter statsPrinter = new StatisticsPrinter();
  statsPrinter.startThread();

  try {
    performMerge(scanners, store, writer);
    writer.close();
  } finally {
    statsPrinter.requestStop();
  }

  Path resultPath = writer.getPath();

  resultPath = tryUsingSimpleOutputPath(resultPath);

  long fileSize = fs.getFileStatus(resultPath).getLen();
  LOG.info("Created " + resultPath + ", size " + fileSize);

  System.out.println();
  System.out.println("HFile information for " + resultPath);
  System.out.println();

  HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
  hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:55,代码来源:HFileReadWriteTest.java

示例3: scanColSet

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = (RegionScannerImpl) region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReaderForTesting().getHFileReader().getPath();
      Path p2 = s2.getReaderForTesting().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFile.Reader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReaderForTesting();

  new HFilePrettyPrinter().run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<KeyValue> allResults = new ArrayList<KeyValue>();

  { // Limit the scope of results.
    List<KeyValue> results = new ArrayList<KeyValue>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<Integer>();
  for (KeyValue kv : allResults) {
    String qual = Bytes.toString(kv.getQualifier());
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<Integer>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:70,代码来源:TestScanWithBloomError.java

示例4: runMergeWorkload

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
public void runMergeWorkload() throws IOException {
  long maxKeyCount = prepareForMerge();

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(
      HFileReadWriteTest.class.getSimpleName());
  columnDescriptor.setBlocksize(blockSize);
  columnDescriptor.setBloomFilterType(bloomType);
  columnDescriptor.setCompressionType(compression);
  columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
  HRegionInfo regionInfo = new HRegionInfo();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
  HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd, null);
  HStore store = new HStore(region, columnDescriptor, conf);

  List<StoreFileScanner> scanners =
      StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
          false, region.getReadpoint(IsolationLevel.READ_COMMITTED));

  StoreFile.Writer writer = store.createWriterInTmp(maxKeyCount, compression, false, true, false);

  StatisticsPrinter statsPrinter = new StatisticsPrinter();
  statsPrinter.startThread();

  try {
    performMerge(scanners, store, writer);
    writer.close();
  } finally {
    statsPrinter.requestStop();
  }

  Path resultPath = writer.getPath();

  resultPath = tryUsingSimpleOutputPath(resultPath);

  long fileSize = fs.getFileStatus(resultPath).getLen();
  LOG.info("Created " + resultPath + ", size " + fileSize);

  System.out.println();
  System.out.println("HFile information for " + resultPath);
  System.out.println();

  HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
  hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:45,代码来源:HFileReadWriteTest.java

示例5: scanColSet

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = (RegionScannerImpl) region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReaderForTesting().getHFileReader().getPath();
      Path p2 = s2.getReaderForTesting().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFile.Reader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReaderForTesting();

  new HFilePrettyPrinter().run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<Cell> allResults = new ArrayList<Cell>();

  { // Limit the scope of results.
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<Integer>();
  for (Cell kv : allResults) {
    String qual = Bytes.toString(CellUtil.cloneQualifier(kv));
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<Integer>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:70,代码来源:TestScanWithBloomError.java

示例6: scanColSet

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
private void scanColSet(int[] colSet, int[] expectedResultCols)
    throws IOException {
  LOG.info("Scanning column set: " + Arrays.toString(colSet));
  Scan scan = new Scan(ROW_BYTES, ROW_BYTES);
  addColumnSetToScan(scan, colSet);
  RegionScannerImpl scanner = region.getScanner(scan);
  KeyValueHeap storeHeap = scanner.getStoreHeapForTesting();
  assertEquals(0, storeHeap.getHeap().size());
  StoreScanner storeScanner =
      (StoreScanner) storeHeap.getCurrentForTesting();
  @SuppressWarnings({ "unchecked", "rawtypes" })
  List<StoreFileScanner> scanners = (List<StoreFileScanner>)
      (List) storeScanner.getAllScannersForTesting();

  // Sort scanners by their HFile's modification time.
  Collections.sort(scanners, new Comparator<StoreFileScanner>() {
    @Override
    public int compare(StoreFileScanner s1, StoreFileScanner s2) {
      Path p1 = s1.getReader().getHFileReader().getPath();
      Path p2 = s2.getReader().getHFileReader().getPath();
      long t1, t2;
      try {
        t1 = fs.getFileStatus(p1).getModificationTime();
        t2 = fs.getFileStatus(p2).getModificationTime();
      } catch (IOException ex) {
        throw new RuntimeException(ex);
      }
      return t1 < t2 ? -1 : t1 == t2 ? 1 : 0;
    }
  });

  StoreFileReader lastStoreFileReader = null;
  for (StoreFileScanner sfScanner : scanners)
    lastStoreFileReader = sfScanner.getReader();

  new HFilePrettyPrinter(conf).run(new String[]{ "-m", "-p", "-f",
      lastStoreFileReader.getHFileReader().getPath().toString()});

  // Disable Bloom filter for the last store file. The disabled Bloom filter
  // will always return "true".
  LOG.info("Disabling Bloom filter for: "
      + lastStoreFileReader.getHFileReader().getName());
  lastStoreFileReader.disableBloomFilterForTesting();

  List<Cell> allResults = new ArrayList<>();

  { // Limit the scope of results.
    List<Cell> results = new ArrayList<>();
    while (scanner.next(results) || results.size() > 0) {
      allResults.addAll(results);
      results.clear();
    }
  }

  List<Integer> actualIds = new ArrayList<>();
  for (Cell kv : allResults) {
    String qual = Bytes.toString(CellUtil.cloneQualifier(kv));
    assertTrue(qual.startsWith(QUALIFIER_PREFIX));
    actualIds.add(Integer.valueOf(qual.substring(
        QUALIFIER_PREFIX.length())));
  }
  List<Integer> expectedIds = new ArrayList<>();
  for (int expectedId : expectedResultCols)
    expectedIds.add(expectedId);

  LOG.info("Column ids returned: " + actualIds + ", expected: "
      + expectedIds);
  assertEquals(expectedIds.toString(), actualIds.toString());
}
 
开发者ID:apache,项目名称:hbase,代码行数:70,代码来源:TestScanWithBloomError.java

示例7: runMergeWorkload

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
public void runMergeWorkload() throws IOException {
  long maxKeyCount = prepareForMerge();

  List<StoreFileScanner> scanners =
      StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
          false);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(
      HFileReadWriteTest.class.getSimpleName());
  columnDescriptor.setBlocksize(blockSize);
  columnDescriptor.setBloomFilterType(bloomType);
  columnDescriptor.setCompressionType(compression);
  columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
  HRegionInfo regionInfo = new HRegionInfo();
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
  HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd, null);
  HStore store = new HStore(region, columnDescriptor, conf);

  StoreFile.Writer writer = store.createWriterInTmp(maxKeyCount, compression, false, true);

  StatisticsPrinter statsPrinter = new StatisticsPrinter();
  statsPrinter.startThread();

  try {
    performMerge(scanners, store, writer);
    writer.close();
  } finally {
    statsPrinter.requestStop();
  }

  Path resultPath = writer.getPath();

  resultPath = tryUsingSimpleOutputPath(resultPath);

  long fileSize = fs.getFileStatus(resultPath).getLen();
  LOG.info("Created " + resultPath + ", size " + fileSize);

  System.out.println();
  System.out.println("HFile information for " + resultPath);
  System.out.println();

  HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
  hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:45,代码来源:HFileReadWriteTest.java

示例8: runMergeWorkload

import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter; //导入依赖的package包/类
public void runMergeWorkload() throws IOException {
  long maxKeyCount = prepareForMerge();

  List<StoreFileScanner> scanners =
      StoreFileScanner.getScannersForStoreFiles(inputStoreFiles, false,
          false);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(
      HFileReadWriteTest.class.getSimpleName());
  columnDescriptor.setBlocksize(blockSize);
  columnDescriptor.setBloomFilterType(bloomType);
  columnDescriptor.setCompressionType(compression);
  columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
  HRegionInfo regionInfo = new HRegionInfo();
  HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
  HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
      null);
  HStore store = new HStore(outputDir, region, columnDescriptor, fs, conf);

  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
      new CacheConfig(conf), fs, blockSize)
          .withOutputDir(outputDir)
          .withCompression(compression)
          .withDataBlockEncoder(dataBlockEncoder)
          .withBloomType(bloomType)
          .withMaxKeyCount(maxKeyCount)
          .withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
          .withBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
          .build();

  StatisticsPrinter statsPrinter = new StatisticsPrinter();
  statsPrinter.startThread();

  try {
    performMerge(scanners, store, writer);
    writer.close();
  } finally {
    statsPrinter.requestStop();
  }

  Path resultPath = writer.getPath();

  resultPath = tryUsingSimpleOutputPath(resultPath);

  long fileSize = fs.getFileStatus(resultPath).getLen();
  LOG.info("Created " + resultPath + ", size " + fileSize);

  System.out.println();
  System.out.println("HFile information for " + resultPath);
  System.out.println();

  HFilePrettyPrinter hfpp = new HFilePrettyPrinter();
  hfpp.run(new String[] { "-m", "-f", resultPath.toString() });
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:55,代码来源:HFileReadWriteTest.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。