当前位置: 首页>>代码示例>>Java>>正文


Java BloomFilterFactory.createDeleteBloomAtWrite方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.BloomFilterFactory.createDeleteBloomAtWrite方法的典型用法代码示例。如果您正苦于以下问题:Java BloomFilterFactory.createDeleteBloomAtWrite方法的具体用法?Java BloomFilterFactory.createDeleteBloomAtWrite怎么用?Java BloomFilterFactory.createDeleteBloomAtWrite使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.BloomFilterFactory的用法示例。


在下文中一共展示了BloomFilterFactory.createDeleteBloomAtWrite方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 *
 * @param fs           file system to write to
 * @param path         file name to create
 * @param conf         user configuration
 * @param comparator   key comparator
 * @param bloomType    bloom filter setting
 * @param maxKeys      the expected maximum number of keys to be added. Was used for Bloom filter
 *                     size in {@link HFile} format version 1.
 * @param favoredNodes
 * @param fileContext  - The HFile context
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException {
  writer = HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withComparator(comparator)
      .withFavoredNodes(favoredNodes).withFileContext(fileContext).create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory
      .createGeneralBloomAtWrite(conf, cacheConf, bloomType,
          (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    if (LOG.isTraceEnabled()) LOG.trace(
        "Bloom filter type for " + path + ": " + this.bloomType + ", "
            + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE),
            writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    if (LOG.isTraceEnabled()) LOG.trace(
        "Delete Family Bloom filter type for " + path + ": " + deleteFamilyBloomFilterWriter
            .getClass().getSimpleName());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:StoreFile.java

示例2: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param blocksize HDFS block size
 * @param compress HDFS block compression
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used for Bloom filter
 *          size in {@link HFile} format version 1.
 * @param checksumType the checksum type
 * @param bytesPerChecksum the number of bytes per checksum value
 * @param includeMVCCReadpoint whether to write the mvcc readpoint to the file for each KV
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, int blocksize, Compression.Algorithm compress,
    HFileDataBlockEncoder dataBlockEncoder, final Configuration conf, CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    final ChecksumType checksumType, final int bytesPerChecksum, boolean includeMVCCReadpoint)
    throws IOException {
  this.dataBlockEncoder =
      dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  writer =
      HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withBlockSize(blocksize)
          .withCompression(compress).withDataBlockEncoder(dataBlockEncoder)
          .withComparator(comparator.getRawComparator()).withChecksumType(checksumType)
          .withBytesPerChecksum(bytesPerChecksum).includeMVCCReadpoint(includeMVCCReadpoint)
          .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter =
      BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, bloomType,
        (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    LOG.info("Bloom filter type for " + path + ": " + this.bloomType + ", "
        + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter =
        BloomFilterFactory.createDeleteBloomAtWrite(conf, cacheConf,
          (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    WinterOptimizer.ReplaceRawCode("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
    // LOG.info("Delete Family Bloom filter type for " + path + ": "
    // + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:64,代码来源:StoreFile.java

示例3: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @param favoredNodes
 * @param fileContext - The HFile context
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path,
               final Configuration conf,
               CacheConfig cacheConf,
               final KVComparator comparator, BloomType bloomType, long maxKeys,
               InetSocketAddress[] favoredNodes, HFileContext fileContext)
        throws IOException {
    writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fs, path)
            .withComparator(comparator)
            .withFavoredNodes(favoredNodes)
            .withFileContext(fileContext)
            .create();

    this.kvComparator = comparator;

    generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
            conf, cacheConf, bloomType,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

    if (generalBloomFilterWriter != null) {
        this.bloomType = bloomType;
        if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " +
                this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName());
    } else {
        // Not using Bloom filters.
        this.bloomType = BloomType.NONE;
    }

    // initialize delete family Bloom filter when there is NO RowCol Bloom
    // filter
    if (this.bloomType != BloomType.ROWCOL) {
        this.deleteFamilyBloomFilterWriter = BloomFilterFactory
                .createDeleteBloomAtWrite(conf, cacheConf,
                        (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
    } else {
        deleteFamilyBloomFilterWriter = null;
    }
    if (deleteFamilyBloomFilterWriter != null) {
        if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": "
                + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
    }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:56,代码来源:StoreFile.java

示例4: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @param favoredNodes
 * @param fileContext - The HFile context
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path,
    final Configuration conf,
    CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    InetSocketAddress[] favoredNodes, HFileContext fileContext) 
        throws IOException {
  writer = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, path)
      .withComparator(comparator)
      .withFavoredNodes(favoredNodes)
      .withFileContext(fileContext)
      .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
      conf, cacheConf, bloomType,
      (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " +
      this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:56,代码来源:StoreFile.java

示例5: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param blocksize HDFS block size
 * @param compress HDFS block compression
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @param checksumType the checksum type
 * @param bytesPerChecksum the number of bytes per checksum value
 * @param includeMVCCReadpoint whether to write the mvcc readpoint to the file for each KV
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, int blocksize,
    Compression.Algorithm compress,
    HFileDataBlockEncoder dataBlockEncoder, final Configuration conf,
    CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    final ChecksumType checksumType, final int bytesPerChecksum, boolean includeMVCCReadpoint)
    throws IOException {
  this.dataBlockEncoder = dataBlockEncoder != null ?
      dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  writer = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, path)
      .withBlockSize(blocksize)
      .withCompression(compress)
      .withDataBlockEncoder(dataBlockEncoder)
      .withComparator(comparator.getRawComparator())
      .withChecksumType(checksumType)
      .withBytesPerChecksum(bytesPerChecksum)
      .includeMVCCReadpoint(includeMVCCReadpoint)
      .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
      conf, cacheConf, bloomType,
      (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    LOG.info("Bloom filter type for " + path + ": " + this.bloomType + ", "
        + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    LOG.info("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:68,代码来源:StoreFile.java

示例6: StoreFileWriter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
   * Creates an HFile.Writer that also write helpful meta data.
   * @param fs file system to write to
   * @param path file name to create
   * @param conf user configuration
   * @param comparator key comparator
   * @param bloomType bloom filter setting
   * @param maxKeys the expected maximum number of keys to be added. Was used
   *        for Bloom filter size in {@link HFile} format version 1.
   * @param favoredNodes
   * @param fileContext - The HFile context
   * @param shouldDropCacheBehind Drop pages written to page cache after writing the store file.
   * @throws IOException problem writing to FS
   */
  private StoreFileWriter(FileSystem fs, Path path,
      final Configuration conf,
      CacheConfig cacheConf,
      final CellComparator comparator, BloomType bloomType, long maxKeys,
      InetSocketAddress[] favoredNodes, HFileContext fileContext,
      boolean shouldDropCacheBehind)
          throws IOException {
  this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
  // TODO : Change all writers to be specifically created for compaction context
  writer = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, path)
      .withComparator(comparator)
      .withFavoredNodes(favoredNodes)
      .withFileContext(fileContext)
      .withShouldDropCacheBehind(shouldDropCacheBehind)
      .create();

  generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
      conf, cacheConf, bloomType,
      (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    if (LOG.isTraceEnabled()) {
      LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", " +
          generalBloomFilterWriter.getClass().getSimpleName());
    }
    // init bloom context
    switch (bloomType) {
    case ROW:
      bloomContext = new RowBloomContext(generalBloomFilterWriter, comparator);
      break;
    case ROWCOL:
      bloomContext = new RowColBloomContext(generalBloomFilterWriter, comparator);
      break;
    default:
      throw new IOException(
          "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL expected)");
    }
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
    deleteFamilyBloomContext = new RowBloomContext(deleteFamilyBloomFilterWriter, comparator);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) {
    LOG.trace("Delete Family Bloom filter type for " + path + ": " +
        deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:74,代码来源:StoreFileWriter.java

示例7: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @param favoredNodes
 * @param fileContext - The HFile context
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path,
    final Configuration conf,
    CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    InetSocketAddress[] favoredNodes, HFileContext fileContext)
        throws IOException {
  //LOG.info("Shen Li: in Writer constructor");
  writer = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, path)
      .withComparator(comparator)
      .withFavoredNodes(favoredNodes)
      .withFileContext(fileContext)
      .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
      conf, cacheConf, bloomType,
      (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " +
      this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:57,代码来源:StoreFile.java

示例8: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param blocksize HDFS block size
 * @param compress HDFS block compression
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @param checksumType the checksum type
 * @param bytesPerChecksum the number of bytes per checksum value
 * @param includeMVCCReadpoint whether to write the mvcc readpoint to the file for each KV
 * @param favoredNodes
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, int blocksize,
    Compression.Algorithm compress,
    HFileDataBlockEncoder dataBlockEncoder, final Configuration conf,
    CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    final ChecksumType checksumType, final int bytesPerChecksum,
    final boolean includeMVCCReadpoint, InetSocketAddress[] favoredNodes)
        throws IOException {
  this.dataBlockEncoder = dataBlockEncoder != null ?
      dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  writer = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, path)
      .withBlockSize(blocksize)
      .withCompression(compress)
      .withDataBlockEncoder(this.dataBlockEncoder)
      .withComparator(comparator)
      .withChecksumType(checksumType)
      .withBytesPerChecksum(bytesPerChecksum)
      .withFavoredNodes(favoredNodes)
      .includeMVCCReadpoint(includeMVCCReadpoint)
      .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
      conf, cacheConf, bloomType,
      (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " +
      this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:71,代码来源:StoreFile.java

示例9: Writer

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入方法依赖的package包/类
/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param blocksize HDFS block size
 * @param compress HDFS block compression
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used
 *        for Bloom filter size in {@link HFile} format version 1.
 * @param checksumType the checksum type
 * @param bytesPerChecksum the number of bytes per checksum value
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, int blocksize,
    Compression.Algorithm compress,
    HFileDataBlockEncoder dataBlockEncoder, final Configuration conf,
    CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    final ChecksumType checksumType, final int bytesPerChecksum)
    throws IOException {
  this.dataBlockEncoder = dataBlockEncoder != null ?
      dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  writer = HFile.getWriterFactory(conf, cacheConf)
      .withPath(fs, path)
      .withBlockSize(blocksize)
      .withCompression(compress)
      .withDataBlockEncoder(this.dataBlockEncoder)
      .withComparator(comparator.getRawComparator())
      .withChecksumType(checksumType)
      .withBytesPerChecksum(bytesPerChecksum)
      .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite(
      conf, cacheConf, bloomType,
      (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    LOG.info("Bloom filter type for " + path + ": " + this.bloomType + ", "
        + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter = BloomFilterFactory
        .createDeleteBloomAtWrite(conf, cacheConf,
            (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    LOG.info("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:66,代码来源:StoreFile.java


注:本文中的org.apache.hadoop.hbase.util.BloomFilterFactory.createDeleteBloomAtWrite方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。