当前位置: 首页>>代码示例>>Java>>正文


Java Reader.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader.close方法的典型用法代码示例。如果您正苦于以下问题:Java Reader.close方法的具体用法?Java Reader.close怎么用?Java Reader.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.HFile.Reader的用法示例。


在下文中一共展示了Reader.close方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: metablocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(AbstractHFileWriter.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestHFile.java

示例2: metablocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(compress)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:24,代码来源:TestHFile.java

示例3: metablocks

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path mFile = new Path(ROOT_DIR, "meta.hfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(HFileWriterImpl.compressionByName(compress))
                      .withBlockSize(minBlockSize).build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      .create();
  someTestingWithMetaBlock(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
      this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
  reader.loadFileInfo();
  // No data -- this should return false.
  assertFalse(reader.getScanner(false, false).seekTo());
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:TestHFile.java

示例4: testNHoplogNBlockIter

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void testNHoplogNBlockIter() throws Exception {
  Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
  Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
      blockCache, stats, storeStats);
  createHoplog(2000, oplog);
  
  FileSystem fs = hdfsStore.getFileSystem();
  Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
  BlockIndexReader bir = reader.getDataBlockIndexReader();
  int blockCount = bir.getRootBlockCount();
  reader.close();
  
  // make sure there are more than 1 hfile blocks in the hoplog
  assertTrue(1 < blockCount);
  
  Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
  oplog = new HFileSortedOplog(hdfsStore, path2,
      blockCache, stats, storeStats);
  createHoplog(2000, oplog);

  Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
  oplog = new HFileSortedOplog(hdfsStore, path3,
      blockCache, stats, storeStats);
  createHoplog(2000, oplog);
  
  Path[] paths = {path1, path2, path3, path1, path2, path3};
  long half = oplog.getSize()/2;
  long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
  long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
  HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
      hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
  
  int[] keyCounts = new int[2000];
  while (iter.hasNext()) {
    boolean success = iter.next();
    assertTrue(success);
    String key = new String((byte[])iter.getKey()).substring("key-".length());
    keyCounts[Integer.valueOf(key) - 100000] ++;
  }
  
  for (int i : keyCounts) {
    assertEquals(3, i);
  }
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:45,代码来源:HDFSSplitIteratorJUnitTest.java

示例5: basicWithSomeCodec

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
 * test none codecs
 */
void basicWithSomeCodec(String codec) throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
  FSDataOutputStream fout = createFSOutput(ncTFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(codec)
      .create();
  LOG.info(writer);
  writeRecords(writer);
  fout.close();
  FSDataInputStream fin = fs.open(ncTFile);
  Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
    fs.getFileStatus(ncTFile).getLen(), cacheConf);
  System.out.println(cacheConf.toString());
  // Load up the index.
  reader.loadFileInfo();
  // Get a scanner that caches and that does not use pread.
  HFileScanner scanner = reader.getScanner(true, false);
  // Align scanner at start of the file.
  scanner.seekTo();
  readAllRecords(scanner);
  scanner.seekTo(getSomeKey(50));
  assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
  // read the key and see if it matches
  ByteBuffer readKey = scanner.getKey();
  assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
    Bytes.toBytes(readKey)));

  scanner.seekTo(new byte[0]);
  ByteBuffer val1 = scanner.getValue();
  scanner.seekTo(new byte[0]);
  ByteBuffer val2 = scanner.getValue();
  assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));

  reader.close();
  fin.close();
  fs.delete(ncTFile, true);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:44,代码来源:TestHFile.java

示例6: basicWithSomeCodec

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
 * test none codecs
 * @param useTags 
 */
void basicWithSomeCodec(String codec, boolean useTags) throws IOException {
  if (useTags) {
    conf.setInt("hfile.format.version", 3);
  }
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags);
  FSDataOutputStream fout = createFSOutput(ncTFile);
  HFileContext meta = new HFileContextBuilder()
                      .withBlockSize(minBlockSize)
                      .withCompression(AbstractHFileWriter.compressionByName(codec))
                      .build();
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withFileContext(meta)
      // NOTE: This test is dependent on this deprecated nonstandard comparator
      .withComparator(new KeyValue.RawBytesComparator())
      .create();
  LOG.info(writer);
  writeRecords(writer, useTags);
  fout.close();
  FSDataInputStream fin = fs.open(ncTFile);
  Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
    fs.getFileStatus(ncTFile).getLen(), cacheConf, conf);
  System.out.println(cacheConf.toString());
  // Load up the index.
  reader.loadFileInfo();
  // Get a scanner that caches and that does not use pread.
  HFileScanner scanner = reader.getScanner(true, false);
  // Align scanner at start of the file.
  scanner.seekTo();
  readAllRecords(scanner);
  scanner.seekTo(getSomeKey(50));
  assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
  // read the key and see if it matches
  ByteBuffer readKey = scanner.getKey();
  assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
    Bytes.toBytes(readKey)));

  scanner.seekTo(new byte[0]);
  ByteBuffer val1 = scanner.getValue();
  scanner.seekTo(new byte[0]);
  ByteBuffer val2 = scanner.getValue();
  assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));

  reader.close();
  fin.close();
  fs.delete(ncTFile, true);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:53,代码来源:TestHFile.java

示例7: basicWithSomeCodec

import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
 * test none codecs
 */
void basicWithSomeCodec(String codec) throws IOException {
  if (cacheConf == null) cacheConf = new CacheConfig(conf);
  Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
  FSDataOutputStream fout = createFSOutput(ncTFile);
  Writer writer = HFile.getWriterFactory(conf, cacheConf)
      .withOutputStream(fout)
      .withBlockSize(minBlockSize)
      .withCompression(codec)
      // NOTE: This test is dependent on this deprecated nonstandard comparator
      .withComparator(new KeyValue.RawBytesComparator())
      .create();
  LOG.info(writer);
  writeRecords(writer);
  fout.close();
  FSDataInputStream fin = fs.open(ncTFile);
  Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
    fs.getFileStatus(ncTFile).getLen(), cacheConf);
  System.out.println(cacheConf.toString());
  // Load up the index.
  reader.loadFileInfo();
  // Get a scanner that caches and that does not use pread.
  HFileScanner scanner = reader.getScanner(true, false);
  // Align scanner at start of the file.
  scanner.seekTo();
  readAllRecords(scanner);
  scanner.seekTo(getSomeKey(50));
  assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
  // read the key and see if it matches
  ByteBuffer readKey = scanner.getKey();
  assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
    Bytes.toBytes(readKey)));

  scanner.seekTo(new byte[0]);
  ByteBuffer val1 = scanner.getValue();
  scanner.seekTo(new byte[0]);
  ByteBuffer val2 = scanner.getValue();
  assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));

  reader.close();
  fin.close();
  fs.delete(ncTFile, true);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:46,代码来源:TestHFile.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFile.Reader.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。