当前位置: 首页>>代码示例>>Java>>正文


Java KeyValue.RAW_COMPARATOR属性代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue.RAW_COMPARATOR属性的典型用法代码示例。如果您正苦于以下问题:Java KeyValue.RAW_COMPARATOR属性的具体用法?Java KeyValue.RAW_COMPARATOR怎么用?Java KeyValue.RAW_COMPARATOR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hbase.KeyValue的用法示例。


在下文中一共展示了KeyValue.RAW_COMPARATOR属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createDeleteBloomAtWrite

/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:BloomFilterFactory.java

示例2: testHeapSizeForBlockIndex

/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  expected -= ClassSize.align(3 * ClassSize.ARRAY);

  if (expected != actual) {
    ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestHFileBlockIndex.java

示例3: createGeneralBloomAtWrite

/**
 * Creates a new general (Row or RowCol) Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 *
 * @param conf
 * @param cacheConf
 * @param bloomType
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, BloomType bloomType, int maxKeys,
    HFile.Writer writer) {
  if (!isGeneralBloomEnabled(conf)) {
    LOG.trace("Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  } else if (bloomType == BloomType.NONE) {
    LOG.trace("Bloom filter is turned off for the column family");
    return null;
  }

  float err = getErrorRate(conf);

  // In case of row/column Bloom filter lookups, each lookup is an OR if two
  // separate lookups. Therefore, if each lookup's false positive rate is p,
  // the resulting false positive rate is err = 1 - (1 - p)^2, and
  // p = 1 - sqrt(1 - err).
  if (bloomType == BloomType.ROWCOL) {
    err = (float) (1 - Math.sqrt(1 - err));
  }

  int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
      MAX_ALLOWED_FOLD_FACTOR);

  // Do we support compound bloom filters?
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:BloomFilterFactory.java

示例4: getComparator

@Override
  public KVComparator getComparator() {
//    return Bytes.BYTES_RAWCOMPARATOR;
    return KeyValue.RAW_COMPARATOR;
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:ByteBloomFilter.java

示例5: readIndex

private void readIndex(boolean useTags) throws IOException {
  long fileSize = fs.getFileStatus(path).getLen();
  LOG.info("Size of " + path + ": " + fileSize);

  FSDataInputStream istream = fs.open(path);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withCompression(compr)
                      .build();
  HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(istream, fs.getFileStatus(path)
      .getLen(), meta);

  BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
  HFileBlockIndex.BlockIndexReader indexReader =
      new HFileBlockIndex.BlockIndexReader(
          KeyValue.RAW_COMPARATOR, numLevels, brw);

  indexReader.readRootIndex(blockReader.blockRange(rootIndexOffset,
      fileSize).nextBlockWithBlockType(BlockType.ROOT_INDEX), numRootEntries);

  long prevOffset = -1;
  int i = 0;
  int expectedHitCount = 0;
  int expectedMissCount = 0;
  LOG.info("Total number of keys: " + keys.size());
  for (byte[] key : keys) {
    assertTrue(key != null);
    assertTrue(indexReader != null);
    HFileBlock b =
        indexReader.seekToDataBlock(new KeyValue.KeyOnlyKeyValue(key, 0, key.length), null, true,
          true, false, null);
    if (KeyValue.COMPARATOR.compareFlatKey(key, firstKeyInFile) < 0) {
      assertTrue(b == null);
      ++i;
      continue;
    }

    String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key);

    assertTrue("seekToDataBlock failed for " + keyStr, b != null);

    if (prevOffset == b.getOffset()) {
      assertEquals(++expectedHitCount, brw.hitCount);
    } else {
      LOG.info("First key in a new block: " + keyStr + ", block offset: "
          + b.getOffset() + ")");
      assertTrue(b.getOffset() > prevOffset);
      assertEquals(++expectedMissCount, brw.missCount);
      prevOffset = b.getOffset();
    }
    ++i;
  }

  istream.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:57,代码来源:TestHFileBlockIndex.java


注:本文中的org.apache.hadoop.hbase.KeyValue.RAW_COMPARATOR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。