本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader类的典型用法代码示例。如果您正苦于以下问题:Java Reader类的具体用法?Java Reader怎么用?Java Reader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Reader类属于org.apache.hadoop.hbase.io.hfile.HFile包,在下文中一共展示了Reader类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCorruptTruncatedHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
* Create a truncated hfile and verify that exception thrown.
*/
public void testCorruptTruncatedHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, getName());
HFileContext context = new HFileContextBuilder().build();
Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
.withFileContext(context).create();
writeSomeRecords(w, 0, 100, false);
w.close();
Path trunc = new Path(f.getParent(), "trucated");
truncateFile(fs, w.getPath(), trunc);
try {
Reader r = HFile.createReader(fs, trunc, cacheConf, conf);
} catch (CorruptHFileException che) {
// Expected failure
return;
}
fail("Should have thrown exception");
}
示例2: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder()
.withCompression(AbstractHFileWriter.compressionByName(compress))
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例3: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
writer.append(kv);
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例4: testCorruptTruncatedHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
* Create a truncated hfile and verify that exception thrown.
*/
public void testCorruptTruncatedHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, getName());
Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f).create();
writeSomeRecords(w, 0, 100);
w.close();
Path trunc = new Path(f.getParent(), "trucated");
truncateFile(fs, w.getPath(), trunc);
try {
Reader r = HFile.createReader(fs, trunc, cacheConf);
} catch (CorruptHFileException che) {
// Expected failure
return;
}
fail("Should have thrown exception");
}
示例5: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(compress)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例6: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(compressAlgo)
.create();
writer.append("foo".getBytes(), "value".getBytes());
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例7: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
writer.append("foo".getBytes(), "value".getBytes());
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例8: testCorrupt0LengthHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
* Create 0-length hfile and show that it fails
*/
@Test
public void testCorrupt0LengthHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, testName.getMethodName());
FSDataOutputStream fsos = fs.create(f);
fsos.close();
try {
Reader r = HFile.createReader(fs, f, cacheConf, true, conf);
} catch (CorruptHFileException che) {
// Expected failure
return;
}
fail("Should have thrown exception");
}
示例9: testCorruptTruncatedHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
/**
* Create a truncated hfile and verify that exception thrown.
*/
@Test
public void testCorruptTruncatedHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, testName.getMethodName());
HFileContext context = new HFileContextBuilder().build();
Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f)
.withFileContext(context).create();
writeSomeRecords(w, 0, 100, false);
w.close();
Path trunc = new Path(f.getParent(), "trucated");
truncateFile(fs, w.getPath(), trunc);
try {
Reader r = HFile.createReader(fs, trunc, cacheConf, true, conf);
} catch (CorruptHFileException che) {
// Expected failure
return;
}
fail("Should have thrown exception");
}
示例10: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder()
.withCompression(HFileWriterImpl.compressionByName(compress))
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例11: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入依赖的package包/类
@Test
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
writer.append(kv);
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf, true, conf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}