当前位置: 首页>>代码示例>>Java>>正文


Java HFile.createReader方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.createReader方法的典型用法代码示例。如果您正苦于以下问题:Java HFile.createReader方法的具体用法?Java HFile.createReader怎么用?Java HFile.createReader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFile的用法示例。


在下文中一共展示了HFile.createReader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCreateWriter

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestStore.java

示例2: extractHFileKey

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    if (key == null) {
      return null;
    }
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestEncryptionRandomKeying.java

示例3: doSmokeTest

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withCompression(codec)
      .create();
  writer.append(Bytes.toBytes("testkey"), Bytes.toBytes("testval"));
  writer.appendFileInfo(Bytes.toBytes("infokey"), Bytes.toBytes("infoval"));
  writer.close();

  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
  reader.loadFileInfo();
  byte[] key = reader.getFirstKey();
  boolean rc = Bytes.toString(key).equals("testkey");
  reader.close();

  if (!rc) {
    throw new Exception("Read back incorrect result: " +
                        Bytes.toStringBinary(key));
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:23,代码来源:CompressionTest.java

示例4: readHFileSeqId

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void readHFileSeqId(Path file, Compression.Algorithm compression) throws Exception
{
  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  @SuppressWarnings("unused")
  KeyValue kv = null;
  scanner.seekTo();

  for (int i = 0; i < testSize; i++) {
    scanner.seekTo(getKey(i).getBytes());
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:17,代码来源:HadoopFilePerformanceTest.java

示例5: readHFileRandom

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void readHFileRandom(Path file, Compression.Algorithm compression) throws Exception
{
  CacheConfig cacheConf = new CacheConfig(conf);
  HFile.Reader reader = HFile.createReader(hdfs, file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(true, true, false);

  @SuppressWarnings("unused")
  KeyValue kv = null;
  scanner.seekTo();
  Random random = new Random();
  for (int i = 0; i < testSize; i++) {
    scanner.seekTo();
    scanner.seekTo(getKey(random.nextInt(testSize)).getBytes());
    kv = scanner.getKeyValue();
    //logger.debug("key: {} value: {}", new String (kv.getKey()), new String (kv.getValue()));
  }
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:18,代码来源:HadoopFilePerformanceTest.java

示例6: scanHFileOnLocalFS

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void scanHFileOnLocalFS(Path file) throws IOException {
  HColumnDescriptor family = desc.getFamily(Bytes.toBytes("f"));
  CacheConfig cacheConf = new CacheConfig(conf, family);
  HFile.Reader reader = HFile.createReader(LocalFileSystem.getLocal(conf), file, cacheConf, conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  int n = 0;
  do {
    Cell cell = scanner.getKeyValue();
    printKV(cell);
    ++n;
  } while (scanner.next());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:HFileTest.java

示例7: doSmokeTest

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
public static void doSmokeTest(FileSystem fs, Path path, String codec)
throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HFileContext context = new HFileContextBuilder()
                         .withCompression(AbstractHFileWriter.compressionByName(codec)).build();
  HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
      .withPath(fs, path)
      .withFileContext(context)
      .create();
  // Write any-old Cell...
  final byte [] rowKey = Bytes.toBytes("compressiontestkey");
  Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval"));
  writer.append(c);
  writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), Bytes.toBytes("compressioninfoval"));
  writer.close();
  Cell cc = null;
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner(false, true);
    scanner.seekTo(); // position to the start of file
    // Scanner does not do Cells yet. Do below for now till fixed.
    cc = scanner.getKeyValue();
    if (CellComparator.compareRows(c, cc) != 0) {
      throw new Exception("Read back incorrect result: " + c.toString() + " vs " + cc.toString());
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:CompressionTest.java

示例8: checkHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Checks a path to see if it is a valid hfile.
 *
 * @param p
 *          full Path to an HFile
 * @throws IOException
 *           This is a connectivity related exception
 */
protected void checkHFile(Path p) throws IOException {
  HFile.Reader r = null;
  try {
    r = HFile.createReader(fs, p, cacheConf, conf);
  } catch (CorruptHFileException che) {
    LOG.warn("Found corrupt HFile " + p, che);
    corrupted.add(p);
    if (inQuarantineMode) {
      Path dest = createQuarantinePath(p);
      LOG.warn("Quarantining corrupt HFile " + p + " into " + dest);
      boolean success = fs.mkdirs(dest.getParent());
      success = success ? fs.rename(p, dest): false;
      if (!success) {
        failures.add(p);
      } else {
        quarantined.add(dest);
      }
    }
    return;
  } catch (FileNotFoundException fnfe) {
    LOG.warn("HFile " + p + " was missing.  Likely removed due to compaction/split?");
    missing.add(p);
  } finally {
    hfilesChecked.addAndGet(1);
    if (r != null) {
      r.close(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:HFileCorruptionChecker.java

示例9: compactLMDIndex

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Do a minor/major compaction for store files' index.
 *
 * @param compactedFile if null, compact index from this file, else compact each StoreFile's index
 *                      together
 * @return Product of compaction or null if there is no index column cell
 * @throws IOException
 */
void compactLMDIndex(final Path compactedFile, HStore store, TimeRangeTracker timeRangeTracker)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug(
        "Generate intermediate index file from major compaction file=" + compactedFile + " in cf="
            + store.toString());
  }
  HFile.Reader reader =
      HFile.createReader(store.fs.getFileSystem(), compactedFile, store.cacheConf, conf);
  HFileScanner scanner = reader.getScanner(false, false, true);
  Queue<KeyValue> rawRecords = new LinkedList<>();
  int counter = 0;
  try {
    scanner.seekTo();
    do {
      KeyValue kv = (KeyValue) scanner.getKeyValue();
      if (store.indexTableRelation.isIndexColumn(kv.getFamily(), kv.getQualifier())) {
        rawRecords.add(kv);
      }
      ++counter;
    } while (scanner.next());
  } finally {
    if (reader != null) reader.close();
  }
  System.out.println("in compacted file=" + compactedFile + ", number of keyvalue=" + counter
      + ", for LMDIndex is:" + rawRecords.size());
  LMDIndexWriter lmdIndexWriter =
      new LMDIndexWriter(store, compactedFile, timeRangeTracker, "COMPACT");
  lmdIndexWriter.processKeyValueQueue(rawRecords);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:Compactor.java

示例10: parseKeyValueFromReader

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private Queue<KeyValue> parseKeyValueFromReader() throws IOException {
  HFile.Reader reader = HFile
      .createReader(store.getFileSystem(), majorMovedPath, store.getCacheConfig(), store.conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  Queue<KeyValue> queue = new LinkedList<>();
  do {
    KeyValue cell = (KeyValue) scanner.getKeyValue();
    queue.offer(cell);
  } while (scanner.next());
  reader.close();
  return queue;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:CompactJobQueue.java

示例11: winterTestingHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private void winterTestingHFile(Path file) throws IOException {
  HFile.Reader reader =
      HFile.createReader(store.getFileSystem(), file, store.getCacheConfig(), store.conf);
  HFileScanner scanner = reader.getScanner(false, false, false);
  scanner.seekTo();
  int n = 0;
  do {
    Cell cell = scanner.getKeyValue();
    ++n;
  } while (scanner.next());
  LOG.info("LCDBG, HFile has: " + n + " in " + file);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:CompactJobQueue.java

示例12: getKVCountFromHfile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
/**
 * Method returns the total KVs in given hfile
 * @param fs File System
 * @param p HFile path
 * @return KV count in the given hfile
 * @throws IOException
 */
private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  reader.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestImportTsv.java

示例13: verifyHFile

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private int verifyHFile(Path p) throws IOException {
  Configuration conf = util.getConfiguration();
  HFile.Reader reader = HFile.createReader(
      p.getFileSystem(conf), p, new CacheConfig(conf), conf);
  reader.loadFileInfo();
  HFileScanner scanner = reader.getScanner(false, false);
  scanner.seekTo();
  int count = 0;
  do {
    count++;
  } while (scanner.next());
  assertTrue(count > 0);
  reader.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestLoadIncrementalHFiles.java

示例14: extractHFileKey

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    assertNotNull("Crypto context has no key", key);
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestHBaseFsckEncryption.java

示例15: extractHFileKey

import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private static byte[] extractHFileKey(Path path) throws Exception {
  HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path,
    new CacheConfig(conf), conf);
  try {
    reader.loadFileInfo();
    Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
    assertNotNull("Reader has a null crypto context", cryptoContext);
    Key key = cryptoContext.getKey();
    assertNotNull("Crypto context has no key", key);
    return key.getEncoded();
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestEncryptionKeyRotation.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.createReader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。