本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader.close方法的典型用法代码示例。如果您正苦于以下问题:Java Reader.close方法的具体用法?Java Reader.close怎么用?Java Reader.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFile.Reader
的用法示例。
在下文中一共展示了Reader.close方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder()
.withCompression(AbstractHFileWriter.compressionByName(compress))
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例2: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(compress)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例3: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder()
.withCompression(HFileWriterImpl.compressionByName(compress))
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例4: testNHoplogNBlockIter
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void testNHoplogNBlockIter() throws Exception {
Path path1 = new Path(testDataDir, "region/0/1-1-1.hop");
Hoplog oplog = new HFileSortedOplog(hdfsStore, path1,
blockCache, stats, storeStats);
createHoplog(2000, oplog);
FileSystem fs = hdfsStore.getFileSystem();
Reader reader = HFile.createReader(fs, path1, new CacheConfig(fs.getConf()));
BlockIndexReader bir = reader.getDataBlockIndexReader();
int blockCount = bir.getRootBlockCount();
reader.close();
// make sure there are more than 1 hfile blocks in the hoplog
assertTrue(1 < blockCount);
Path path2 = new Path(testDataDir, "region/0/1-2-1.hop");
oplog = new HFileSortedOplog(hdfsStore, path2,
blockCache, stats, storeStats);
createHoplog(2000, oplog);
Path path3 = new Path(testDataDir, "region/0/1-3-1.hop");
oplog = new HFileSortedOplog(hdfsStore, path3,
blockCache, stats, storeStats);
createHoplog(2000, oplog);
Path[] paths = {path1, path2, path3, path1, path2, path3};
long half = oplog.getSize()/2;
long[] starts = {0, 0, 0, half + 1, half + 1, half + 1};
long[] lengths = {half, half, half, oplog.getSize(), oplog.getSize(), oplog.getSize()};
HDFSSplitIterator iter = HDFSSplitIterator.newInstance(
hdfsStore.getFileSystem(), paths, starts, lengths, 0, 0);
int[] keyCounts = new int[2000];
while (iter.hasNext()) {
boolean success = iter.next();
assertTrue(success);
String key = new String((byte[])iter.getKey()).substring("key-".length());
keyCounts[Integer.valueOf(key) - 100000] ++;
}
for (int i : keyCounts) {
assertEquals(3, i);
}
}
示例5: basicWithSomeCodec
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* test none codecs
*/
void basicWithSomeCodec(String codec) throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
FSDataOutputStream fout = createFSOutput(ncTFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(codec)
.create();
LOG.info(writer);
writeRecords(writer);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
fs.getFileStatus(ncTFile).getLen(), cacheConf);
System.out.println(cacheConf.toString());
// Load up the index.
reader.loadFileInfo();
// Get a scanner that caches and that does not use pread.
HFileScanner scanner = reader.getScanner(true, false);
// Align scanner at start of the file.
scanner.seekTo();
readAllRecords(scanner);
scanner.seekTo(getSomeKey(50));
assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
// read the key and see if it matches
ByteBuffer readKey = scanner.getKey();
assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
Bytes.toBytes(readKey)));
scanner.seekTo(new byte[0]);
ByteBuffer val1 = scanner.getValue();
scanner.seekTo(new byte[0]);
ByteBuffer val2 = scanner.getValue();
assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));
reader.close();
fin.close();
fs.delete(ncTFile, true);
}
示例6: basicWithSomeCodec
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* test none codecs
* @param useTags
*/
void basicWithSomeCodec(String codec, boolean useTags) throws IOException {
if (useTags) {
conf.setInt("hfile.format.version", 3);
}
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString() + useTags);
FSDataOutputStream fout = createFSOutput(ncTFile);
HFileContext meta = new HFileContextBuilder()
.withBlockSize(minBlockSize)
.withCompression(AbstractHFileWriter.compressionByName(codec))
.build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
// NOTE: This test is dependent on this deprecated nonstandard comparator
.withComparator(new KeyValue.RawBytesComparator())
.create();
LOG.info(writer);
writeRecords(writer, useTags);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
fs.getFileStatus(ncTFile).getLen(), cacheConf, conf);
System.out.println(cacheConf.toString());
// Load up the index.
reader.loadFileInfo();
// Get a scanner that caches and that does not use pread.
HFileScanner scanner = reader.getScanner(true, false);
// Align scanner at start of the file.
scanner.seekTo();
readAllRecords(scanner);
scanner.seekTo(getSomeKey(50));
assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
// read the key and see if it matches
ByteBuffer readKey = scanner.getKey();
assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
Bytes.toBytes(readKey)));
scanner.seekTo(new byte[0]);
ByteBuffer val1 = scanner.getValue();
scanner.seekTo(new byte[0]);
ByteBuffer val2 = scanner.getValue();
assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));
reader.close();
fin.close();
fs.delete(ncTFile, true);
}
示例7: basicWithSomeCodec
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* test none codecs
*/
void basicWithSomeCodec(String codec) throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
FSDataOutputStream fout = createFSOutput(ncTFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(codec)
// NOTE: This test is dependent on this deprecated nonstandard comparator
.withComparator(new KeyValue.RawBytesComparator())
.create();
LOG.info(writer);
writeRecords(writer);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
fs.getFileStatus(ncTFile).getLen(), cacheConf);
System.out.println(cacheConf.toString());
// Load up the index.
reader.loadFileInfo();
// Get a scanner that caches and that does not use pread.
HFileScanner scanner = reader.getScanner(true, false);
// Align scanner at start of the file.
scanner.seekTo();
readAllRecords(scanner);
scanner.seekTo(getSomeKey(50));
assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
// read the key and see if it matches
ByteBuffer readKey = scanner.getKey();
assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
Bytes.toBytes(readKey)));
scanner.seekTo(new byte[0]);
ByteBuffer val1 = scanner.getValue();
scanner.seekTo(new byte[0]);
ByteBuffer val2 = scanner.getValue();
assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));
reader.close();
fin.close();
fs.delete(ncTFile, true);
}