当前位置: 首页>>代码示例>>Java>>正文


Java BloomType.ROW属性代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.BloomType.ROW属性的典型用法代码示例。如果您正苦于以下问题:Java BloomType.ROW属性的具体用法?Java BloomType.ROW怎么用?Java BloomType.ROW使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hbase.regionserver.BloomType的用法示例。


在下文中一共展示了BloomType.ROW属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseColumnFamilyOptions

private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:LoadTestTool.java

示例2: createDeleteBloomAtWrite

/**
 * Creates a new Delete Family Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
 * @param conf
 * @param cacheConf
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
  if (!isDeleteFamilyBloomEnabled(conf)) {
    LOG.info("Delete Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  }

  float err = getErrorRate(conf);

  int maxFold = getMaxFold(conf);
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      null, BloomType.ROW);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
开发者ID:apache,项目名称:hbase,代码行数:30,代码来源:BloomFilterFactory.java

示例3: add

public void add(Cell cell) {
  /*
   * For faster hashing, use combinatorial generation
   * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf
   */
  int hash1;
  int hash2;
  HashKey<Cell> hashKey;
  if (this.bloomType == BloomType.ROW) {
    hashKey = new RowBloomHashKey(cell);
    hash1 = this.hash.hash(hashKey, 0);
    hash2 = this.hash.hash(hashKey, hash1);
  } else {
    hashKey = new RowColBloomHashKey(cell);
    hash1 = this.hash.hash(hashKey, 0);
    hash2 = this.hash.hash(hashKey, hash1);
  }
  setHashLoc(hash1, hash2);
}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:BloomFilterChunk.java

示例4: parseColumnFamilyOptions

private void parseColumnFamilyOptions(CommandLine cmd) {
  String dataBlockEncodingStr = cmd.getOptionValue(HFileTestUtil.OPT_DATA_BLOCK_ENCODING);
  dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
      DataBlockEncoding.valueOf(dataBlockEncodingStr);

  String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
  compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
      Compression.Algorithm.valueOf(compressStr);

  String bloomStr = cmd.getOptionValue(OPT_BLOOM);
  bloomType = bloomStr == null ? BloomType.ROW :
      BloomType.valueOf(bloomStr);

  inMemoryCF = cmd.hasOption(OPT_INMEMORY);
  if (cmd.hasOption(OPT_ENCRYPTION)) {
    cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
  }

}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:LoadTestTool.java

示例5: append

@Override
public void append(Cell cell) throws IOException {
  if (cell == null)
    throw new NullPointerException();

  enqueueReadyChunk(false);

  if (chunk == null) {
    if (firstKeyInChunk != null) {
      throw new IllegalStateException("First key in chunk already set: "
          + Bytes.toStringBinary(firstKeyInChunk));
    }
    // This will be done only once per chunk
    if (bloomType == BloomType.ROW) {
      firstKeyInChunk = CellUtil.copyRow(cell);
    } else {
      firstKeyInChunk =
          PrivateCellUtil
              .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell));
    }
    allocateNewChunk();
  }

  chunk.add(cell);
  this.prevCell = cell;
  ++totalKeyCount;
}
 
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:CompoundBloomFilterWriter.java

示例6: contains

public static boolean contains(Cell cell, ByteBuff bloomBuf, int bloomOffset, int bloomSize,
    Hash hash, int hashCount, BloomType type) {
  HashKey<Cell> hashKey = type == BloomType.ROW ? new RowBloomHashKey(cell)
      : new RowColBloomHashKey(cell);
  return contains(bloomBuf, bloomOffset, bloomSize, hash, hashCount, hashKey);
}
 
开发者ID:apache,项目名称:hbase,代码行数:6,代码来源:BloomFilterUtil.java

示例7: BloomFilterChunk

/**
 * Determines &amp; initializes bloom filter meta data from user config. Call
 * {@link #allocBloom()} to allocate bloom filter data.
 *
 * @param maxKeys Maximum expected number of keys that will be stored in this
 *          bloom
 * @param errorRate Desired false positive error rate. Lower rate = more
 *          storage required
 * @param hashType Type of hash function to use
 * @param foldFactor When finished adding entries, you may be able to 'fold'
 *          this bloom to save space. Tradeoff potentially excess bytes in
 *          bloom for ability to fold if keyCount is exponentially greater
 *          than maxKeys.
 * @throws IllegalArgumentException
 */
// Used only in testcases
public BloomFilterChunk(int maxKeys, double errorRate, int hashType,
    int foldFactor) throws IllegalArgumentException {
  this(hashType, BloomType.ROW);

  long bitSize = BloomFilterUtil.computeBitSize(maxKeys, errorRate);
  hashCount = BloomFilterUtil.optimalFunctionCount(maxKeys, bitSize);
  this.maxKeys = maxKeys;

  // increase byteSize so folding is possible
  byteSize = BloomFilterUtil.computeFoldableByteSize(bitSize, foldFactor);

  sanityCheck();
}
 
开发者ID:apache,项目名称:hbase,代码行数:29,代码来源:BloomFilterChunk.java


注:本文中的org.apache.hadoop.hbase.regionserver.BloomType.ROW属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。