当前位置: 首页>>代码示例>>Java>>正文


Java CellComparator.compareRows方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.CellComparator.compareRows方法的典型用法代码示例。如果您正苦于以下问题:Java CellComparator.compareRows方法的具体用法?Java CellComparator.compareRows怎么用?Java CellComparator.compareRows使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.CellComparator的用法示例。


在下文中一共展示了CellComparator.compareRows方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doSmokeTest

import org.apache.hadoop.hbase.CellComparator; //导入方法依赖的package包/类
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFileContext context = new HFileContextBuilder()
                         .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  // Write any-old Cell...
  final byte [] rowKey = Bytes.toBytes("compressiontestkey");
  Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
  writer.append(c);
  writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
  writer.close();
  Cell cc = null;
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner(false, true);
    scanner.seekTo(); // position to the start of file
    // Scanner does not do Cells yet. Do below for now till fixed.
    cc = scanner.getKeyValue();
    if (CellComparator.compareRows(c, cc) != 0) {
      throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:CompressionTest.java

示例2: compare

import org.apache.hadoop.hbase.CellComparator; //导入方法依赖的package包/类
/**
 * Compare two Cells considering reversed scanner.
 * ReversedScanner only reverses rows, not columns.
 */
private int compare(Cell a, Cell b) {
  int r = 0;
  if (currentRegion != null && currentRegion.isMetaRegion()) {
    r = metaComparator.compareRows(a, b);
  } else {
    r = CellComparator.compareRows(a, b);
  }
  if (r != 0) {
    return this.scan.isReversed() ? -r : r;
  }
  return CellComparator.compareWithoutRow(a, b);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:ClientScanner.java

示例3: assertBulkLoadHFileOk

import org.apache.hadoop.hbase.CellComparator; //导入方法依赖的package包/类
@Override public void assertBulkLoadHFileOk(Path srcPath) throws IOException {
  HFile.Reader reader = null;
  try {
    LOG.info(
        "Validating hfile at " + srcPath + " for inclusion in " + "store " + this + " region "
            + this.getRegionInfo().getRegionNameAsString());
    reader = HFile.createReader(srcPath.getFileSystem(conf), srcPath, cacheConf, conf);
    reader.loadFileInfo();

    byte[] firstKey = reader.getFirstRowKey();
    Preconditions.checkState(firstKey != null, "First key can not be null");
    byte[] lk = reader.getLastKey();
    Preconditions.checkState(lk != null, "Last key can not be null");
    byte[] lastKey = KeyValue.createKeyValueFromKey(lk).getRow();

    LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey) + " last=" + Bytes
        .toStringBinary(lastKey));
    LOG.debug(
        "Region bounds: first=" + Bytes.toStringBinary(getRegionInfo().getStartKey()) + " last="
            + Bytes.toStringBinary(getRegionInfo().getEndKey()));

    if (!this.getRegionInfo().containsRange(firstKey, lastKey)) {
      throw new WrongRegionException(
          "Bulk load file " + srcPath.toString() + " does not fit inside region " + this
              .getRegionInfo().getRegionNameAsString());
    }

    if (reader.length() > conf
        .getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) {
      LOG.warn(
          "Trying to bulk load hfile " + srcPath.toString() + " with size: " + reader.length()
              + " bytes can be problematic as it may lead to oversplitting.");
    }

    if (verifyBulkLoads) {
      long verificationStartTime = EnvironmentEdgeManager.currentTime();
      LOG.info("Full verification started for bulk load hfile: " + srcPath.toString());
      Cell prevCell = null;
      HFileScanner scanner = reader.getScanner(false, false, false);
      scanner.seekTo();
      do {
        Cell cell = scanner.getKeyValue();
        if (prevCell != null) {
          if (CellComparator.compareRows(prevCell, cell) > 0) {
            throw new InvalidHFileException(
                "Previous row is greater than" + " current row: path=" + srcPath + " previous="
                    + CellUtil.getCellKeyAsString(prevCell) + " current=" + CellUtil
                    .getCellKeyAsString(cell));
          }
          if (CellComparator.compareFamilies(prevCell, cell) != 0) {
            throw new InvalidHFileException(
                "Previous key had different" + " family compared to current key: path=" + srcPath
                    + " previous=" + Bytes
                    .toStringBinary(prevCell.getFamilyArray(), prevCell.getFamilyOffset(),
                        prevCell.getFamilyLength()) + " current=" + Bytes
                    .toStringBinary(cell.getFamilyArray(), cell.getFamilyOffset(),
                        cell.getFamilyLength()));
          }
        }
        prevCell = cell;
      } while (scanner.next());
      LOG.info(
          "Full verification complete for bulk load hfile: " + srcPath.toString() + " took " + (
              EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms");
    }
  } finally {
    if (reader != null) reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:HStore.java

示例4: scanKeysValues

import org.apache.hadoop.hbase.CellComparator; //导入方法依赖的package包/类
private void scanKeysValues(Path file, KeyValueStatsCollector fileStats,
    HFileScanner scanner,  byte[] row) throws IOException {
  Cell pCell = null;
  do {
    Cell cell = scanner.getKeyValue();
    if (row != null && row.length != 0) {
      int result = CellComparator.compareRows(cell.getRowArray(), cell.getRowOffset(),
          cell.getRowLength(), row, 0, row.length);
      if (result > 0) {
        break;
      } else if (result < 0) {
        continue;
      }
    }
    // collect stats
    if (printStats) {
      fileStats.collect(cell);
    }
    // dump key value
    if (printKey) {
      System.out.print("K: " + cell);
      if (printValue) {
        System.out.print(" V: "
            + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(),
                cell.getValueLength()));
        int i = 0;
        List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
            cell.getTagsLength());
        for (Tag tag : tags) {
          System.out.print(String.format(" T[%d]: %s", i++,
              Bytes.toStringBinary(tag.getBuffer(), tag.getTagOffset(), tag.getTagLength())));
        }
      }
      System.out.println();
    }
    // check if rows are in order
    if (checkRow && pCell != null) {
      if (CellComparator.compareRows(pCell, cell) > 0) {
        System.err.println("WARNING, previous row is greater then"
            + " current row\n\tfilename -> " + file + "\n\tprevious -> "
            + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent  -> "
            + CellUtil.getCellKeyAsString(cell));
      }
    }
    // check if families are consistent
    if (checkFamily) {
      String fam = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(),
          cell.getFamilyLength());
      if (!file.toString().contains(fam)) {
        System.err.println("WARNING, filename does not match kv family,"
            + "\n\tfilename -> " + file + "\n\tkeyvalue -> "
            + CellUtil.getCellKeyAsString(cell));
      }
      if (pCell != null && CellComparator.compareFamilies(pCell, cell) != 0) {
        System.err.println("WARNING, previous kv has different family"
            + " compared to current key\n\tfilename -> " + file
            + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell)
            + "\n\tcurrent  -> " + CellUtil.getCellKeyAsString(cell));
      }
    }
    pCell = cell;
    ++count;
  } while (scanner.next());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:HFilePrettyPrinter.java

示例5: verifyAllEditsMadeItIn

import org.apache.hadoop.hbase.CellComparator; //导入方法依赖的package包/类
/**
 * @param fs
 * @param conf
 * @param edits
 * @param region
 * @return Return how many edits seen.
 * @throws IOException
 */
private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf,
    final Path edits, final HRegion region)
throws IOException {
  int count = 0;
  // Based on HRegion#replayRecoveredEdits
  WAL.Reader reader = null;
  try {
    reader = WALFactory.createReader(fs, edits, conf);
    WAL.Entry entry;
    while ((entry = reader.next()) != null) {
      WALKey key = entry.getKey();
      WALEdit val = entry.getEdit();
      count++;
      // Check this edit is for this region.
      if (!Bytes.equals(key.getEncodedRegionName(),
          region.getRegionInfo().getEncodedNameAsBytes())) {
        continue;
      }
      Cell previous = null;
      for (Cell cell: val.getCells()) {
        if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) continue;
        if (previous != null && CellComparator.compareRows(previous, cell) == 0) continue;
        previous = cell;
        Get g = new Get(CellUtil.cloneRow(cell));
        Result r = region.get(g);
        boolean found = false;
        for (CellScanner scanner = r.cellScanner(); scanner.advance();) {
          Cell current = scanner.current();
          if (CellComparator.compare(cell, current, true) == 0) {
            found = true;
            break;
          }
        }
        assertTrue("Failed to find " + cell, found);
      }
    }
  } finally {
    if (reader != null) reader.close();
  }
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:TestRecoveredEdits.java


注:本文中的org.apache.hadoop.hbase.CellComparator.compareRows方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。