当前位置: 首页>>代码示例>>Java>>正文


Java Reader类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader的典型用法代码示例。如果您正苦于以下问题:Java Reader类的具体用法?Java Reader怎么用?Java Reader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Reader类属于org.apache.hadoop.hbase.io.hfile.HFile包,在下文中一共展示了Reader类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCorruptTruncatedHFile

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestHFile.java

示例2: metablocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHFile.java

示例3: testNullMetaBlocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFile.java

示例4: testCorruptTruncatedHFile

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
 * Create a truncated hfile and verify that exception thrown.
 */
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, getName());
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
  writeSomeRecords(w, 0, 100);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:TestHFile.java

示例5: metablocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(compress)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:24,代码来源:TestHFile.java

示例6: testNullMetaBlocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withBlockSize(minBlockSize)
        .withCompression(compressAlgo)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:20,代码来源:TestHFile.java

示例7: testNullMetaBlocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo : 
      HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    writer.append("foo".getBytes(), "value".getBytes());
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:TestHFile.java

示例8: testCorrupt0LengthHFile

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
 * Create 0-length hfile and show that it fails
 */
@Test
public void testCorrupt0LengthHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, testName.getMethodName());
  FSDataOutputStream fsos = fs.create(f);
  fsos.close();

  try {
    Reader r = HFile.createReader(fs, f, cacheConf, true, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:TestHFile.java

示例9: testCorruptTruncatedHFile

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
 * Create a truncated hfile and verify that exception thrown.
 */
@Test
public void testCorruptTruncatedHFile() throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path f = new Path(ROOT_DIR, testName.getMethodName());
  HFileContext  context = new HFileContextBuilder().build();
  Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
      .withFileContext(context).create();
  writeSomeRecords(w, 0, 100, false);
  w.close();

  Path trunc = new Path(f.getParent(), "trucated");
  truncateFile(fs, w.getPath(), trunc);

  try {
    Reader r = HFile.createReader(fs, trunc, cacheConf, true, conf);
  } catch (CorruptHFileException che) {
    // Expected failure
    return;
  }
  fail("Should have thrown exception");
}
 
开发者ID:apache,项目名称:hbase,代码行数:25,代码来源:TestHFile.java

示例10: metablocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(HFileWriterImpl.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:TestHFile.java

示例11: testNullMetaBlocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
@Test
public void testNullMetaBlocks() throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  for (Compression.Algorithm compressAlgo :
      HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, true, conf);
    reader.loadFileInfo();
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:TestHFile.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.Reader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。