当前位置: 首页>>代码示例>>Java>>正文


Java BloomFilterFactory类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.BloomFilterFactory的典型用法代码示例。如果您正苦于以下问题:Java BloomFilterFactory类的具体用法?Java BloomFilterFactory怎么用?Java BloomFilterFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BloomFilterFactory类属于org.apache.hadoop.hbase.util包,在下文中一共展示了BloomFilterFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: validateFalsePosRate

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
/**
 * Validates the false positive ratio by computing its z-value and comparing
 * it to the provided threshold.
 *
 * @param falsePosRate experimental positive rate
 * @param nTrials the number of Bloom filter checks
 * @param zValueBoundary z-value boundary, positive for an upper bound and
 *          negative for a lower bound
 * @param cbf the compound Bloom filter we are using
 * @param additionalMsg additional message to include in log output and
 *          assertion failures
 */
private void validateFalsePosRate(double falsePosRate, int nTrials,
    double zValueBoundary, CompoundBloomFilter cbf, String additionalMsg) {
  double p = BloomFilterFactory.getErrorRate(conf);
  double zValue = (falsePosRate - p) / Math.sqrt(p * (1 - p) / nTrials);

  String assortedStatsStr = " (targetErrorRate=" + p + ", falsePosRate="
      + falsePosRate + ", nTrials=" + nTrials + ")";
  LOG.info("z-value is " + zValue + assortedStatsStr);

  boolean isUpperBound = zValueBoundary > 0;

  if (isUpperBound && zValue > zValueBoundary ||
      !isUpperBound && zValue < zValueBoundary) {
    String errorMsg = "False positive rate z-value " + zValue + " is "
        + (isUpperBound ? "higher" : "lower") + " than " + zValueBoundary
        + assortedStatsStr + ". Per-chunk stats:\n"
        + cbf.formatTestingStats();
    fail(errorMsg + additionalMsg);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestCompoundBloomFilter.java

示例2: testBloomFilter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
                      .withChecksumType(CKTYPE)
                      .withBytesPerCheckSum(CKBYTES).build();
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(f)
          .withBloomType(BloomType.ROW)
          .withMaxKeyCount(2000)
          .withFileContext(meta)
          .build();
  bloomWriteRead(writer, fs);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestStoreFile.java

示例3: setUp

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
  cacheConf =
      new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
      cowType.shouldBeCached(BlockType.LEAF_INDEX),
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
          false, false, false);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCacheOnWrite.java

示例4: HFileSortedOplogWriter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();
      
      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:19,代码来源:HFileSortedOplog.java

示例5: HFileSortedOplogWriter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:25,代码来源:HFileSortedOplog.java

示例6: testBloomFilter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
      (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
      StoreFile.DEFAULT_BLOCKSIZE_SMALL)
          .withFilePath(f)
          .withBloomType(StoreFile.BloomType.ROW)
          .withMaxKeyCount(2000)
          .withChecksumType(CKTYPE)
          .withBytesPerChecksum(CKBYTES)
          .build();
  bloomWriteRead(writer, fs);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:TestStoreFile.java

示例7: setUp

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:TestCacheOnWrite.java

示例8: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param fileInfo  The store file information.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
                 final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
    this.fs = fs;
    this.fileInfo = fileInfo;
    this.cacheConf = cacheConf;

    if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
        this.cfBloomType = cfBloomType;
    } else {
        LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " +
                "cfBloomType=" + cfBloomType + " (disabled in config)");
        this.cfBloomType = BloomType.NONE;
    }

    // cache the modification time stamp of this store file
    this.modificationTimeStamp = fileInfo.getModificationTime();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:33,代码来源:StoreFile.java

示例9: setUp

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf =
      new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
      cowType.shouldBeCached(BlockType.LEAF_INDEX),
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, true, false);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:17,代码来源:TestCacheOnWrite.java

示例10: StoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a
 * substantial amount of ram depending on the underlying files (10-20MB?).
 *
 * @param fs  The current file system to use.
 * @param fileInfo  The store file information.
 * @param conf  The current configuration.
 * @param cacheConf  The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified
 *          by column family configuration. This may or may not be the same
 *          as the Bloom filter type actually present in the HFile, because
 *          column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
    final CacheConfig cacheConf,  final BloomType cfBloomType) throws IOException {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;

  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " +
        "cfBloomType=" + cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  this.modificationTimeStamp = fileInfo.getModificationTime();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:StoreFile.java

示例11: setUp

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:TestCacheOnWrite.java

示例12: HStoreFile

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 * @param fs fs The current file system to use.
 * @param fileInfo The store file information.
 * @param conf The current configuration.
 * @param cacheConf The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column
 *          family configuration. This may or may not be the same as the Bloom filter type
 *          actually present in the HFile, because column family configuration might change. If
 *          this is {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param primaryReplica true if this is a store file for primary replica, otherwise false.
 */
public HStoreFile(FileSystem fs, StoreFileInfo fileInfo, Configuration conf, CacheConfig cacheConf,
    BloomType cfBloomType, boolean primaryReplica) {
  this.fs = fs;
  this.fileInfo = fileInfo;
  this.cacheConf = cacheConf;
  this.noReadahead =
      conf.getBoolean(STORE_FILE_READER_NO_READAHEAD, DEFAULT_STORE_FILE_READER_NO_READAHEAD);
  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " + "cfBloomType=" +
        cfBloomType + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }
  this.primaryReplica = primaryReplica;
}
 
开发者ID:apache,项目名称:hbase,代码行数:30,代码来源:HStoreFile.java

示例13: testBloomFilter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
@Test
public void testBloomFilter() throws Exception {
  FileSystem fs = FileSystem.getLocal(conf);
  conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
  conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);

  // write the file
  Path f = new Path(ROOT_DIR, getName());
  HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
                      .withChecksumType(CKTYPE)
                      .withBytesPerCheckSum(CKBYTES).build();
  // Make a store file and write data to it.
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
          .withFilePath(f)
          .withBloomType(BloomType.ROW)
          .withMaxKeyCount(2000)
          .withFileContext(meta)
          .build();
  bloomWriteRead(writer, fs);
}
 
开发者ID:apache,项目名称:hbase,代码行数:21,代码来源:TestHStoreFile.java

示例14: setUp

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
  cacheConf =
      new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
      cowType.shouldBeCached(BlockType.LEAF_INDEX),
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
          false, false);
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestCacheOnWrite.java

示例15: createWriter

import org.apache.hadoop.hbase.util.BloomFilterFactory; //导入依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when done.
 * If metadata, add BEFORE closing using appendMetadata()
 * @param fs
 * @param dir Path to family directory.  Makes the directory if doesn't exist.
 * Creates a file with a unique name in this directory.
 * @param blocksize
 * @param algorithm Pass null to get default.
 * @param c Pass null to get default.
 * @param conf HBase system configuration. used with bloom filters
 * @param cacheConf Cache configuration and reference.
 * @param bloomType column family setting for bloom filters
 * @param maxKeyCount estimated maximum number of keys we expect to add
 * @return HFile.Writer
 * @throws IOException
 */
public static StoreFile.Writer createWriter(final FileSystem fs,
                                            final Path dir,
                                            final int blocksize,
                                            final Compression.Algorithm algorithm,
                                            final KeyValue.KVComparator c,
                                            final Configuration conf,
                                            final CacheConfig cacheConf,
                                            BloomType bloomType,
                                            long maxKeyCount)
    throws IOException {

  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }
  Path path = getUniqueFile(fs, dir);
  if (!BloomFilterFactory.isBloomEnabled(conf)) {
    bloomType = BloomType.NONE;
  }

  return new Writer(fs, path, blocksize,
      algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
      conf, cacheConf, c == null ? KeyValue.COMPARATOR: c, bloomType,
      maxKeyCount);
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:41,代码来源:StoreFile.java


注:本文中的org.apache.hadoop.hbase.util.BloomFilterFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。