当前位置: 首页>>代码示例>>Java>>正文


Java NoOpDataBlockEncoder.INSTANCE属性代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder.INSTANCE属性的典型用法代码示例。如果您正苦于以下问题:Java NoOpDataBlockEncoder.INSTANCE属性的具体用法?Java NoOpDataBlockEncoder.INSTANCE怎么用?Java NoOpDataBlockEncoder.INSTANCE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder的用法示例。


在下文中一共展示了NoOpDataBlockEncoder.INSTANCE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: validateStoreFile

/**
 * Validates a store file by opening and closing it. In HFileV2 this should not be an expensive
 * operation.
 * @param path the path to the store file
 */
private void validateStoreFile(Path path) throws IOException {
  StoreFile storeFile = null;
  try {
    storeFile =
        new StoreFile(this.fs, path, this.conf, this.cacheConf, this.family.getBloomFilterType(),
            NoOpDataBlockEncoder.INSTANCE);
    passSchemaMetricsTo(storeFile);
    storeFile.createReader();
  } catch (IOException e) {
    LOG.error("Failed to open store file : " + path + ", keeping it in tmp location", e);
    throw e;
  } finally {
    if (storeFile != null) {
      storeFile.closeReader(false);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:Store.java

示例2: validateStoreFile

/**
 * Validates a store file by opening and closing it. In HFileV2 this should
 * not be an expensive operation.
 *
 * @param path the path to the store file
 */
private void validateStoreFile(Path path)
    throws IOException {
  StoreFile storeFile = null;
  try {
    storeFile = new StoreFile(this.fs, path, this.conf,
        this.cacheConf, this.family.getBloomFilterType(),
        NoOpDataBlockEncoder.INSTANCE);
    passSchemaMetricsTo(storeFile);
    storeFile.createReader();
  } catch (IOException e) {
    LOG.error("Failed to open store file : " + path
        + ", keeping it in tmp location", e);
    throw e;
  } finally {
    if (storeFile != null) {
      storeFile.closeReader(false);
    }
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:25,代码来源:Store.java

示例3: StoreFile

/**
 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
 * depending on the underlying files (10-20MB?).
 * @param fs The current file system to use.
 * @param p The path of the file.
 * @param blockcache <code>true</code> if the block cache is enabled.
 * @param conf The current configuration.
 * @param cacheConf The cache configuration and block cache reference.
 * @param cfBloomType The bloom type to use for this store file as specified by column family
 *          configuration. This may or may not be the same as the Bloom filter type actually
 *          present in the HFile, because column family configuration might change. If this is
 *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
 * @param dataBlockEncoder data block encoding algorithm.
 * @throws IOException When opening the reader fails.
 */
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
    final CacheConfig cacheConf, final BloomType cfBloomType,
    final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
  this.fs = fs;
  this.path = p;
  this.cacheConf = cacheConf;
  this.dataBlockEncoder =
      dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE : dataBlockEncoder;
  if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
    this.cfBloomType = cfBloomType;
  } else {
    LOG.info("Ignoring bloom filter check for file " + path + ": " + "cfBloomType=" + cfBloomType
        + " (disabled in config)");
    this.cfBloomType = BloomType.NONE;
  }

  // cache the modification time stamp of this store file
  FileStatus[] stats = FSUtils.listStatus(fs, p, null);
  if (stats != null && stats.length == 1) {
    this.modificationTimeStamp = stats[0].getModificationTime();
  } else {
    this.modificationTimeStamp = 0;
  }
  SchemaMetrics.configureGlobally(conf);
  initPossibleIndexesAndReference(fs, p, conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:41,代码来源:StoreFile.java

示例4: prepareListOfTestSeeks

private List<KeyValue> prepareListOfTestSeeks(Path path) throws IOException {
  List<KeyValue> allKeyValues = new ArrayList<KeyValue>();

  // read all of the key values
  StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
      path, configuration, cacheConf, BloomType.NONE,
      NoOpDataBlockEncoder.INSTANCE);

  StoreFile.Reader reader = storeFile.createReader();
  StoreFileScanner scanner = reader.getStoreFileScanner(true, false);
  KeyValue current;

  scanner.seek(KeyValue.LOWESTKEY);
  while (null != (current = scanner.next())) {
    allKeyValues.add(current);
  }

  storeFile.closeReader(cacheConf.shouldEvictOnClose());

  // pick seeks by random
  List<KeyValue> seeks = new ArrayList<KeyValue>();
  for (int i = 0; i < numberOfSeeks; ++i) {
    KeyValue keyValue = allKeyValues.get(
        randomizer.nextInt(allKeyValues.size()));
    seeks.add(keyValue);
  }

  clearBlockCache();

  return seeks;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:31,代码来源:EncodedSeekPerformanceTest.java

示例5: testCodecs

/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:DataBlockEncodingTool.java

示例6: MockStoreFile

MockStoreFile(long length, boolean isRef) throws IOException {
  super(TEST_UTIL.getTestFileSystem(), TEST_FILE, TEST_UTIL.getConfiguration(),
      new CacheConfig(TEST_UTIL.getConfiguration()), BloomType.NONE,
      NoOpDataBlockEncoder.INSTANCE);
  this.length = length;
  this.isRef = isRef;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:7,代码来源:TestCompactSelection.java

示例7: testReference

/**
 * Test that our mechanism of writing store files in one region to reference
 * store files in other regions works.
 * @throws IOException
 */
public void testReference()
throws IOException {
  // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
  Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
      this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  writeStoreFile(writer);
  StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  StoreFile.Reader reader = hsf.createReader();
  // Split on a row, not in middle of row.  Midkey returned by reader
  // may be in middle of row.  Create new one with empty column and
  // timestamp.
  KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
  byte [] midRow = kv.getRow();
  kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
  byte [] finalRow = kv.getRow();
  // Make a reference
  Path refPath = StoreFile.split(fs, storedir, hsf, midRow, Range.top);
  StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  // Now confirm that I can read from the reference and that it only gets
  // keys from top half of the file.
  HFileScanner s = refHsf.createReader().getScanner(false, false);
  for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
    ByteBuffer bb = s.getKey();
    kv = KeyValue.createKeyValueFromKey(bb);
    if (first) {
      assertTrue(Bytes.equals(kv.getRow(), midRow));
      first = false;
    }
  }
  assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:42,代码来源:TestStoreFile.java

示例8: testHFileLink

public void testHFileLink() throws IOException {
  final String columnFamily = "f";

  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);

  HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
  Path storedir = new Path(new Path(this.testDir,
    new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
       this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  Path storeFilePath = writer.getPath();
  writeStoreFile(writer);
  writer.close();

  Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  assertTrue(hsf.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:TestStoreFile.java

示例9: MockStoreFile

MockStoreFile(long length, boolean isRef) throws IOException {
  super(TEST_UTIL.getTestFileSystem(), TEST_FILE,
        TEST_UTIL.getConfiguration(),
        new CacheConfig(TEST_UTIL.getConfiguration()), BloomType.NONE,
        NoOpDataBlockEncoder.INSTANCE);
  this.length = length;
  this.isRef  = isRef;
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:8,代码来源:TestCompactSelection.java

示例10: testCodecs

/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:38,代码来源:DataBlockEncodingTool.java

示例11: convertEncoding

public static HFileDataBlockEncoder convertEncoding(KeyEncoding type) {
  switch (type) {
  default:
  case NONE: return NoOpDataBlockEncoder.INSTANCE;
  }
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:6,代码来源:HFileSortedOplogFactory.java

示例12: Writer

/**
 * Creates an HFile.Writer that also write helpful meta data.
 * @param fs file system to write to
 * @param path file name to create
 * @param blocksize HDFS block size
 * @param compress HDFS block compression
 * @param conf user configuration
 * @param comparator key comparator
 * @param bloomType bloom filter setting
 * @param maxKeys the expected maximum number of keys to be added. Was used for Bloom filter
 *          size in {@link HFile} format version 1.
 * @param checksumType the checksum type
 * @param bytesPerChecksum the number of bytes per checksum value
 * @param includeMVCCReadpoint whether to write the mvcc readpoint to the file for each KV
 * @throws IOException problem writing to FS
 */
private Writer(FileSystem fs, Path path, int blocksize, Compression.Algorithm compress,
    HFileDataBlockEncoder dataBlockEncoder, final Configuration conf, CacheConfig cacheConf,
    final KVComparator comparator, BloomType bloomType, long maxKeys,
    final ChecksumType checksumType, final int bytesPerChecksum, boolean includeMVCCReadpoint)
    throws IOException {
  this.dataBlockEncoder =
      dataBlockEncoder != null ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  writer =
      HFile.getWriterFactory(conf, cacheConf).withPath(fs, path).withBlockSize(blocksize)
          .withCompression(compress).withDataBlockEncoder(dataBlockEncoder)
          .withComparator(comparator.getRawComparator()).withChecksumType(checksumType)
          .withBytesPerChecksum(bytesPerChecksum).includeMVCCReadpoint(includeMVCCReadpoint)
          .create();

  this.kvComparator = comparator;

  generalBloomFilterWriter =
      BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, bloomType,
        (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);

  if (generalBloomFilterWriter != null) {
    this.bloomType = bloomType;
    LOG.info("Bloom filter type for " + path + ": " + this.bloomType + ", "
        + generalBloomFilterWriter.getClass().getSimpleName());
  } else {
    // Not using Bloom filters.
    this.bloomType = BloomType.NONE;
  }

  // initialize delete family Bloom filter when there is NO RowCol Bloom
  // filter
  if (this.bloomType != BloomType.ROWCOL) {
    this.deleteFamilyBloomFilterWriter =
        BloomFilterFactory.createDeleteBloomAtWrite(conf, cacheConf,
          (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
  } else {
    deleteFamilyBloomFilterWriter = null;
  }
  if (deleteFamilyBloomFilterWriter != null) {
    WinterOptimizer.ReplaceRawCode("Delete Family Bloom filter type for " + path + ": "
        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
    // LOG.info("Delete Family Bloom filter type for " + path + ": "
    // + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
  }
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:63,代码来源:StoreFile.java

示例13: testHFileScannerThrowsErrors

/**
 * Injects errors into the pread calls of an on-disk file, and makes
 * sure those bubble up to the HFile scanner
 */
@Test
public void testHFileScannerThrowsErrors() throws IOException {
  Path hfilePath = new Path(new Path(
      util.getDataTestDir("internalScannerExposesErrors"),
      "regionname"), "familyname");
  HFileSystem hfs = (HFileSystem)util.getTestFileSystem();
  FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
  FileSystem fs = new HFileSystem(faultyfs);
  CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(
      util.getConfiguration(), cacheConf, hfs, 2*1024)
          .withOutputDir(hfilePath)
          .build();
  TestStoreFile.writeStoreFile(
      writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));

  StoreFile sf = new StoreFile(fs, writer.getPath(),
      util.getConfiguration(), cacheConf, StoreFile.BloomType.NONE,
      NoOpDataBlockEncoder.INSTANCE);

  StoreFile.Reader reader = sf.createReader();
  HFileScanner scanner = reader.getScanner(false, true);

  FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
  assertNotNull(inStream);

  scanner.seekTo();
  // Do at least one successful read
  assertTrue(scanner.next());

  faultyfs.startFaults();

  try {
    int scanned=0;
    while (scanner.next()) {
      scanned++;
    }
    fail("Scanner didn't throw after faults injected");
  } catch (IOException ioe) {
    LOG.info("Got expected exception", ioe);
    assertTrue(ioe.getMessage().contains("Fault"));
  }
  reader.close(true); // end of test so evictOnClose
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:48,代码来源:TestFSErrorsExposed.java

示例14: testStoreFileScannerThrowsErrors

/**
 * Injects errors into the pread calls of an on-disk file, and makes
 * sure those bubble up to the StoreFileScanner
 */
@Test
public void testStoreFileScannerThrowsErrors() throws IOException {
  Path hfilePath = new Path(new Path(
      util.getDataTestDir("internalScannerExposesErrors"),
      "regionname"), "familyname");
  HFileSystem hfs = (HFileSystem)util.getTestFileSystem();
  FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
  HFileSystem fs = new HFileSystem(faultyfs);
  CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
  StoreFile.Writer writer = new StoreFile.WriterBuilder(
      util.getConfiguration(), cacheConf, hfs, 2 * 1024)
          .withOutputDir(hfilePath)
          .build();
  TestStoreFile.writeStoreFile(
      writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));

  StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(),
      cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
      Collections.singletonList(sf), false, true, false);
  KeyValueScanner scanner = scanners.get(0);

  FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
  assertNotNull(inStream);

  scanner.seek(KeyValue.LOWESTKEY);
  // Do at least one successful read
  assertNotNull(scanner.next());
  faultyfs.startFaults();

  try {
    int scanned=0;
    while (scanner.next() != null) {
      scanned++;
    }
    fail("Scanner didn't throw after faults injected");
  } catch (IOException ioe) {
    LOG.info("Got expected exception", ioe);
    assertTrue(ioe.getMessage().contains("Could not iterate"));
  }
  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:47,代码来源:TestFSErrorsExposed.java

示例15: readStoreFile

private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
    Path sfPath) throws IOException {
  StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt,
      NoOpDataBlockEncoder.INSTANCE);
  StoreFile.Reader r = sf.createReader();
  final boolean pread = true; // does not really matter
  StoreFileScanner scanner = r.getStoreFileScanner(true, pread);

  {
    // Test for false negatives (not allowed).
    int numChecked = 0;
    for (KeyValue kv : kvs) {
      byte[] row = kv.getRow();
      boolean present = isInBloom(scanner, row, kv.getQualifier());
      assertTrue(testIdMsg + " Bloom filter false negative on row "
          + Bytes.toStringBinary(row) + " after " + numChecked
          + " successful checks", present);
      ++numChecked;
    }
  }

  // Test for false positives (some percentage allowed). We test in two modes:
  // "fake lookup" which ignores the key distribution, and production mode.
  for (boolean fakeLookupEnabled : new boolean[] { true, false }) {
    ByteBloomFilter.setFakeLookupMode(fakeLookupEnabled);
    try {
      String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ?
          "enabled" : "disabled");
      CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter();
      cbf.enableTestingStats();
      int numFalsePos = 0;
      Random rand = new Random(EVALUATION_SEED);
      int nTrials = NUM_KV[t] * 10;
      for (int i = 0; i < nTrials; ++i) {
        byte[] query = TestHFileWriterV2.randomRowOrQualifier(rand);
        if (isInBloom(scanner, query, bt, rand)) {
          numFalsePos += 1;
        }
      }
      double falsePosRate = numFalsePos * 1.0 / nTrials;
      LOG.debug(String.format(testIdMsg
          + " False positives: %d out of %d (%f)",
          numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr);

      // Check for obvious Bloom filter crashes.
      assertTrue("False positive is too high: " + falsePosRate + " (greater "
          + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr,
          falsePosRate < TOO_HIGH_ERROR_RATE);

      // Now a more precise check to see if the false positive rate is not
      // too high. The reason we use a relaxed restriction for the real-world
      // case as opposed to the "fake lookup" case is that our hash functions
      // are not completely independent.

      double maxZValue = fakeLookupEnabled ? 1.96 : 2.5;
      validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf,
          fakeLookupModeStr);

      // For checking the lower bound we need to eliminate the last chunk,
      // because it is frequently smaller and the false positive rate in it
      // is too low. This does not help if there is only one under-sized
      // chunk, though.
      int nChunks = cbf.getNumChunks();
      if (nChunks > 1) {
        numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1);
        nTrials -= cbf.getNumQueriesForTesting(nChunks - 1);
        falsePosRate = numFalsePos * 1.0 / nTrials;
        LOG.info(testIdMsg + " False positive rate without last chunk is " +
            falsePosRate + fakeLookupModeStr);
      }

      validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf,
          fakeLookupModeStr);
    } finally {
      ByteBloomFilter.setFakeLookupMode(false);
    }
  }

  r.close(true); // end of test so evictOnClose
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:80,代码来源:TestCompoundBloomFilter.java


注:本文中的org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder.INSTANCE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。