当前位置: 首页>>代码示例>>Java>>正文


Java TestHFileWriterV2类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2的典型用法代码示例。如果您正苦于以下问题:Java TestHFileWriterV2类的具体用法?Java TestHFileWriterV2怎么用?Java TestHFileWriterV2使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TestHFileWriterV2类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了TestHFileWriterV2类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeStoreFile

import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; //导入依赖的package包/类
private void writeStoreFile(StoreFile.Writer writer) throws IOException {
  final int rowLen = 32;
  for (int i = 0; i < NUM_KV; ++i) {
    byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
    byte[] v = TestHFileWriterV2.randomValue(rand);
    int cfLen = rand.nextInt(k.length - rowLen + 1);
    KeyValue kv = new KeyValue(
        k, 0, rowLen,
        k, rowLen, cfLen,
        k, rowLen + cfLen, k.length - rowLen - cfLen,
        rand.nextLong(),
        generateKeyType(rand),
        v, 0, v.length);
    writer.append(kv);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestCacheOnWriteInSchema.java

示例2: createSortedKeyValues

import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; //导入依赖的package包/类
private List<KeyValue> createSortedKeyValues(Random rand, int n) {
  List<KeyValue> kvList = new ArrayList<KeyValue>(n);
  for (int i = 0; i < n; ++i)
    kvList.add(TestHFileWriterV2.randomKeyValue(rand));
  Collections.sort(kvList, KeyValue.COMPARATOR);
  return kvList;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestCompoundBloomFilter.java

示例3: isInBloom

import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; //导入依赖的package包/类
private boolean isInBloom(StoreFileScanner scanner, byte[] row,
    byte[] qualifier) {
  Scan scan = new Scan(row, row);
  scan.addColumn(Bytes.toBytes(TestHFileWriterV2.COLUMN_FAMILY_NAME), qualifier);
  Store store = mock(Store.class);
  HColumnDescriptor hcd = mock(HColumnDescriptor.class);
  when(hcd.getName()).thenReturn(Bytes.toBytes(TestHFileWriterV2.COLUMN_FAMILY_NAME));
  when(store.getFamily()).thenReturn(hcd);
  return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestCompoundBloomFilter.java

示例4: readStoreFile

import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; //导入依赖的package包/类
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
    Path sfPath) throws IOException {
  StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt);
  StoreFile.Reader r = sf.createReader();
  final boolean pread = true; // does not really matter
  StoreFileScanner scanner = r.getStoreFileScanner(true, pread);

  {
    // Test for false negatives (not allowed).
    int numChecked = 0;
    for (KeyValue kv : kvs) {
      byte[] row = kv.getRow();
      boolean present = isInBloom(scanner, row, kv.getQualifier());
      assertTrue(testIdMsg + " Bloom filter false negative on row "
          + Bytes.toStringBinary(row) + " after " + numChecked
          + " successful checks", present);
      ++numChecked;
    }
  }

  // Test for false positives (some percentage allowed). We test in two modes:
  // "fake lookup" which ignores the key distribution, and production mode.
  for (boolean fakeLookupEnabled : new boolean[] { true, false }) {
    ByteBloomFilter.setFakeLookupMode(fakeLookupEnabled);
    try {
      String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ?
          "enabled" : "disabled");
      CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter();
      cbf.enableTestingStats();
      int numFalsePos = 0;
      Random rand = new Random(EVALUATION_SEED);
      int nTrials = NUM_KV[t] * 10;
      for (int i = 0; i < nTrials; ++i) {
        byte[] query = TestHFileWriterV2.randomRowOrQualifier(rand);
        if (isInBloom(scanner, query, bt, rand)) {
          numFalsePos += 1;
        }
      }
      double falsePosRate = numFalsePos * 1.0 / nTrials;
      LOG.debug(String.format(testIdMsg
          + " False positives: %d out of %d (%f)",
          numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr);

      // Check for obvious Bloom filter crashes.
      assertTrue("False positive is too high: " + falsePosRate + " (greater "
          + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr,
          falsePosRate < TOO_HIGH_ERROR_RATE);

      // Now a more precise check to see if the false positive rate is not
      // too high. The reason we use a relaxed restriction for the real-world
      // case as opposed to the "fake lookup" case is that our hash functions
      // are not completely independent.

      double maxZValue = fakeLookupEnabled ? 1.96 : 2.5;
      validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf,
          fakeLookupModeStr);

      // For checking the lower bound we need to eliminate the last chunk,
      // because it is frequently smaller and the false positive rate in it
      // is too low. This does not help if there is only one under-sized
      // chunk, though.
      int nChunks = cbf.getNumChunks();
      if (nChunks > 1) {
        numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1);
        nTrials -= cbf.getNumQueriesForTesting(nChunks - 1);
        falsePosRate = numFalsePos * 1.0 / nTrials;
        LOG.info(testIdMsg + " False positive rate without last chunk is " +
            falsePosRate + fakeLookupModeStr);
      }

      validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf,
          fakeLookupModeStr);
    } finally {
      ByteBloomFilter.setFakeLookupMode(false);
    }
  }

  r.close(true); // end of test so evictOnClose
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:80,代码来源:TestCompoundBloomFilter.java

示例5: readStoreFile

import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; //导入依赖的package包/类
private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
    Path sfPath) throws IOException {
  StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt,
      NoOpDataBlockEncoder.INSTANCE);
  StoreFile.Reader r = sf.createReader();
  final boolean pread = true; // does not really matter
  StoreFileScanner scanner = r.getStoreFileScanner(true, pread);

  {
    // Test for false negatives (not allowed).
    int numChecked = 0;
    for (KeyValue kv : kvs) {
      byte[] row = kv.getRow();
      boolean present = isInBloom(scanner, row, kv.getQualifier());
      assertTrue(testIdMsg + " Bloom filter false negative on row "
          + Bytes.toStringBinary(row) + " after " + numChecked
          + " successful checks", present);
      ++numChecked;
    }
  }

  // Test for false positives (some percentage allowed). We test in two modes:
  // "fake lookup" which ignores the key distribution, and production mode.
  for (boolean fakeLookupEnabled : new boolean[] { true, false }) {
    ByteBloomFilter.setFakeLookupMode(fakeLookupEnabled);
    try {
      String fakeLookupModeStr = ", fake lookup is " + (fakeLookupEnabled ?
          "enabled" : "disabled");
      CompoundBloomFilter cbf = (CompoundBloomFilter) r.getGeneralBloomFilter();
      cbf.enableTestingStats();
      int numFalsePos = 0;
      Random rand = new Random(EVALUATION_SEED);
      int nTrials = NUM_KV[t] * 10;
      for (int i = 0; i < nTrials; ++i) {
        byte[] query = TestHFileWriterV2.randomRowOrQualifier(rand);
        if (isInBloom(scanner, query, bt, rand)) {
          numFalsePos += 1;
        }
      }
      double falsePosRate = numFalsePos * 1.0 / nTrials;
      LOG.debug(String.format(testIdMsg
          + " False positives: %d out of %d (%f)",
          numFalsePos, nTrials, falsePosRate) + fakeLookupModeStr);

      // Check for obvious Bloom filter crashes.
      assertTrue("False positive is too high: " + falsePosRate + " (greater "
          + "than " + TOO_HIGH_ERROR_RATE + ")" + fakeLookupModeStr,
          falsePosRate < TOO_HIGH_ERROR_RATE);

      // Now a more precise check to see if the false positive rate is not
      // too high. The reason we use a relaxed restriction for the real-world
      // case as opposed to the "fake lookup" case is that our hash functions
      // are not completely independent.

      double maxZValue = fakeLookupEnabled ? 1.96 : 2.5;
      validateFalsePosRate(falsePosRate, nTrials, maxZValue, cbf,
          fakeLookupModeStr);

      // For checking the lower bound we need to eliminate the last chunk,
      // because it is frequently smaller and the false positive rate in it
      // is too low. This does not help if there is only one under-sized
      // chunk, though.
      int nChunks = cbf.getNumChunks();
      if (nChunks > 1) {
        numFalsePos -= cbf.getNumPositivesForTesting(nChunks - 1);
        nTrials -= cbf.getNumQueriesForTesting(nChunks - 1);
        falsePosRate = numFalsePos * 1.0 / nTrials;
        LOG.info(testIdMsg + " False positive rate without last chunk is " +
            falsePosRate + fakeLookupModeStr);
      }

      validateFalsePosRate(falsePosRate, nTrials, -2.58, cbf,
          fakeLookupModeStr);
    } finally {
      ByteBloomFilter.setFakeLookupMode(false);
    }
  }

  r.close(true); // end of test so evictOnClose
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:81,代码来源:TestCompoundBloomFilter.java

示例6: isInBloom

import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; //导入依赖的package包/类
private boolean isInBloom(StoreFileScanner scanner, byte[] row, BloomType bt,
    Random rand) {
  return isInBloom(scanner, row,
      TestHFileWriterV2.randomRowOrQualifier(rand));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:6,代码来源:TestCompoundBloomFilter.java


注:本文中的org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。