当前位置: 首页>>代码示例>>Java>>正文


Java HFile.Reader方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader方法的典型用法代码示例。如果您正苦于以下问题:Java HFile.Reader方法的具体用法?Java HFile.Reader怎么用?Java HFile.Reader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFile的用法示例。


在下文中一共展示了HFile.Reader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: CompoundBloomFilter

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * De-serialization for compound Bloom filter metadata. Must be consistent
 * with what {@link CompoundBloomFilterWriter} does.
 *
 * @param meta serialized Bloom filter metadata without any magic blocks
 * @throws IOException
 */
public CompoundBloomFilter(DataInput meta, HFile.Reader reader)
    throws IOException {
  this.reader = reader;

  totalByteSize = meta.readLong();
  hashCount = meta.readInt();
  hashType = meta.readInt();
  totalKeyCount = meta.readLong();
  totalMaxKeys = meta.readLong();
  numChunks = meta.readInt();
  comparator = FixedFileTrailer.createComparator(
      Bytes.toString(Bytes.readByteArray(meta)));

  hash = Hash.getInstance(hashType);
  if (hash == null) {
    throw new IllegalArgumentException("Invalid hash type: " + hashType);
  }

  index = new HFileBlockIndex.BlockIndexReader(comparator, 1);
  index.readRootIndex(meta, numChunks);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CompoundBloomFilter.java

示例2: createFromMeta

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Instantiates the correct Bloom filter class based on the version provided
 * in the meta block data.
 *
 * @param meta the byte array holding the Bloom filter's metadata, including
 *          version information
 * @param reader the {@link HFile} reader to use to lazily load Bloom filter
 *          blocks
 * @return an instance of the correct type of Bloom filter
 * @throws IllegalArgumentException
 */
public static BloomFilter
    createFromMeta(DataInput meta, HFile.Reader reader)
    throws IllegalArgumentException, IOException {
  int version = meta.readInt();
  switch (version) {
    case ByteBloomFilter.VERSION:
      // This is only possible in a version 1 HFile. We are ignoring the
      // passed comparator because raw byte comparators are always used
      // in version 1 Bloom filters.
      return new ByteBloomFilter(meta);

    case CompoundBloomFilterBase.VERSION:
      return new CompoundBloomFilter(meta, reader);

    default:
      throw new IllegalArgumentException(
        "Bad bloom filter format version " + version
      );
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:BloomFilterFactory.java

示例3: getOldestHfileTs

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
@Override public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
  long result = Long.MAX_VALUE;
  for (Store store : getStores()) {
    Collection<StoreFile> storeFiles = store.getStorefiles();
    if (storeFiles == null) continue;
    for (StoreFile file : storeFiles) {
      StoreFile.Reader sfReader = file.getReader();
      if (sfReader == null) continue;
      HFile.Reader reader = sfReader.getHFileReader();
      if (reader == null) continue;
      if (majorCompactioOnly) {
        byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
        if (val == null) continue;
        if (val == null || !Bytes.toBoolean(val)) {
          continue;
        }
      }
      result = Math.min(result, reader.getFileContext().getFileCreateTime());
    }
  }
  return result == Long.MAX_VALUE ? 0 : result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HRegion.java

示例4: testCreateWriter

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestStore.java

示例5: extractHFileKey

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestEncryptionRandomKeying.java

示例6: doSmokeTest

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withCompression(codec)
      .create();
  writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
  writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
  writer.close();

  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
  reader.loadFileInfo();
  byte[] key = reader.getFirstKey();
  boolean rc = Bytes.toString(key).equals("testkey");
  reader.close();

  if (!rc) {
    throw new Exception("Read back incorrect result: " +
                        Bytes.toStringBinary(key));
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:23,代码来源:CompressionTest.java

示例7: readHFileSeq

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void readHFileSeq(Path file, Compression.Algorithm compression) throws Exception
{

  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  scanner.seekTo();

  @SuppressWarnings("unused")
  KeyValue kv = null;
  while (scanner.next()) {
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }

}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:18,代码来源:HadoopFilePerformanceTest.java

示例8: scanHFileOnLocalFS

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void scanHFileOnLocalFS(Path file) throws IOException {
  HColumnDescriptor family = desc.getFamily(Bytes.toBytes("f"));
  CacheConfig cacheConf = new CacheConfig(conf, family);
  HFile.Reader reader = HFile.createReader(LocalFileSystem.getLocal(conf), file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  int n = 0;
  do {
    Cell cell = scanner.getKeyValue();
    printKV(cell);
    ++n;
  } while (scanner.next());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:HFileTest.java

示例9: doSmokeTest

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFileContext context = new HFileContextBuilder()
                         .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  // Write any-old Cell...
  final byte [] rowKey = Bytes.toBytes("compressiontestkey");
  Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
  writer.append(c);
  writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
  writer.close();
  Cell cc = null;
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner(false, true);
    scanner.seekTo(); // position to the start of file
    // Scanner does not do Cells yet. Do below for now till fixed.
    cc = scanner.getKeyValue();
    if (CellComparator.compareRows(c, cc) != 0) {
      throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:CompressionTest.java

示例10: checkHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Checks a path to see if it is a valid hfile.
 *
 * @param p
 *          full Path to an HFile
 * @throws IOException
 *           This is a connectivity related exception
 */
protected void checkHFile(Path p) throws IOException {
  HFile.Reader r = null;
  try {
    r = HFile.createReader(fs, p, cacheConf, conf);
  } catch (CorruptHFileException che) {
    LOG.warn("Found corrupt HFile " + p, che);
    corrupted.add(p);
    if (inQuarantineMode) {
      Path dest = createQuarantinePath(p);
      LOG.warn("Quarantining corrupt HFile " + p + " into " + dest);
      boolean success = fs.mkdirs(dest.getParent());
      success = success ? fs.rename(p, dest): false;
      if (!success) {
        failures.add(p);
      } else {
        quarantined.add(dest);
      }
    }
    return;
  } catch (FileNotFoundException fnfe) {
    LOG.warn("HFile " + p + " was missing.  Likely removed due to compaction/split?");
    missing.add(p);
  } finally {
    hfilesChecked.addAndGet(1);
    if (r != null) {
      r.close(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:HFileCorruptionChecker.java

示例11: compactLMDIndex

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Do a minor/major compaction for store files' index.
 *
 * @param compactedFile if null, compact index from this file, else compact each StoreFile's index
 *                      together
 * @return Product of compaction or null if there is no index column cell
 * @throws IOException
 */
void compactLMDIndex(final Path compactedFile, HStore store, TimeRangeTracker timeRangeTracker)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug(
        "Generate intermediate index file from major compaction file=" + compactedFile + " in cf="
            + store.toString());
  }
  HFile.Reader reader =
      HFile.createReader(store.fs.getFileSystem(), compactedFile, store.cacheConf, conf);
  HFileScanner scanner = reader.getScanner(false, false, true);
  Queue<KeyValue> rawRecords = new LinkedList<>();
  int counter = 0;
  try {
    scanner.seekTo();
    do {
      KeyValue kv = (KeyValue) scanner.getKeyValue();
      if (store.indexTableRelation.isIndexColumn(kv.getFamily(), kv.getQualifier())) {
        rawRecords.add(kv);
      }
      ++counter;
    } while (scanner.next());
  } finally {
    if (reader != null) reader.close();
  }
  System.out.println("in compacted file=" + compactedFile + ", number of keyvalue=" + counter
      + ", for LMDIndex is:" + rawRecords.size());
  LMDIndexWriter lmdIndexWriter =
      new LMDIndexWriter(store, compactedFile, timeRangeTracker, "COMPACT");
  lmdIndexWriter.processKeyValueQueue(rawRecords);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:Compactor.java

示例12: parseKeyValueFromReader

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private Queue<KeyValue> parseKeyValueFromReader() throws IOException {
  HFile.Reader reader = HFile
      .createReader(store.getFileSystem(), majorMovedPath, store.getCacheConfig(), store.conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  Queue<KeyValue> queue = new LinkedList<>();
  do {
    KeyValue cell = (KeyValue) scanner.getKeyValue();
    queue.offer(cell);
  } while (scanner.next());
  reader.close();
  return queue;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:CompactJobQueue.java

示例13: winterTestingHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void winterTestingHFile(Path file) throws IOException {
  HFile.Reader reader =
      HFile.createReader(store.getFileSystem(), file, store.getCacheConfig(), store.conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  int n = 0;
  do {
    Cell cell = scanner.getKeyValue();
    ++n;
  } while (scanner.next());
  LOG.info("LCDBG, HFile has: " + n + " in " + file);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:CompactJobQueue.java

示例14: getKVCountFromHfile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Method returns the total KVs in given hfile
 * @param fs File System
 * @param p HFile path
 * @return KV count in the given hfile
 * @throws IOException
 */
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  reader.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestImportTsv.java

示例15: verifyHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private int verifyHFile(Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(
      p.getFileSystem(conf), p, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  assertTrue(count > 0);
  reader.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestLoadIncrementalHFiles.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.Reader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。