本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader.loadFileInfo方法的典型用法代码示例。如果您正苦于以下问题:Java Reader.loadFileInfo方法的具体用法?Java Reader.loadFileInfo怎么用?Java Reader.loadFileInfo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFile.Reader
的用法示例。
在下文中一共展示了Reader.loadFileInfo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder()
.withCompression(AbstractHFileWriter.compressionByName(compress))
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例2: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
writer.append(kv);
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例3: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(compress)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例4: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(compressAlgo)
.create();
writer.append("foo".getBytes(), "value".getBytes());
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例5: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
writer.append("foo".getBytes(), "value".getBytes());
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf, conf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例6: metablocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
private void metablocks(final String compress) throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path mFile = new Path(ROOT_DIR, "meta.hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder()
.withCompression(HFileWriterImpl.compressionByName(compress))
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
someTestingWithMetaBlock(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile),
this.fs.getFileStatus(mFile).getLen(), cacheConf, conf);
reader.loadFileInfo();
// No data -- this should return false.
assertFalse(reader.getScanner(false, false).seekTo());
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
示例7: testNullMetaBlocks
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
@Test
public void testNullMetaBlocks() throws Exception {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
for (Compression.Algorithm compressAlgo :
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
FSDataOutputStream fout = createFSOutput(mFile);
HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
.withBlockSize(minBlockSize).build();
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
.create();
KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes());
writer.append(kv);
writer.close();
fout.close();
Reader reader = HFile.createReader(fs, mFile, cacheConf, true, conf);
reader.loadFileInfo();
assertNull(reader.getMetaBlock("non-existant", false));
}
}
示例8: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(KeyValue.createKeyValueFromKey(k)) >= 0) {
ByteBuffer bbkey = scanner.getKey();
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例9: testEmptyHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* Test empty HFile.
* Test all features work reasonably when hfile is empty of entries.
* @throws IOException
*/
public void testEmptyHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, getName());
HFileContext context = new HFileContextBuilder().withIncludesTags(false).build();
Writer w =
HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
w.close();
Reader r = HFile.createReader(fs, f, cacheConf, conf);
r.loadFileInfo();
assertNull(r.getFirstKey());
assertNull(r.getLastKey());
}
示例10: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf));
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(k) >= 0) {
ByteBuffer bbkey = scanner.getKey();
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例11: testEmptyHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* Test empty HFile.
* Test all features work reasonably when hfile is empty of entries.
* @throws IOException
*/
public void testEmptyHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, getName());
Writer w =
HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).create();
w.close();
Reader r = HFile.createReader(fs, f, cacheConf);
r.loadFileInfo();
assertNull(r.getFirstKey());
assertNull(r.getLastKey());
}
示例12: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(k) >= 0) {
ByteBuffer bbkey = scanner.getKey();
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例13: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
reader.loadFileInfo();
KeySampler kSampler = new KeySampler(rng, ((KeyValue) reader.getFirstKey().get()).getKey(),
((KeyValue) reader.getLastKey().get()).getKey(), keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(KeyValueUtil.createKeyValueFromKey(k)) >= 0) {
ByteBuffer bbkey = ByteBuffer.wrap(((KeyValue) scanner.getKey()).getKey());
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例14: testEmptyHFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* Test empty HFile.
* Test all features work reasonably when hfile is empty of entries.
* @throws IOException
*/
@Test
public void testEmptyHFile() throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path f = new Path(ROOT_DIR, testName.getMethodName());
HFileContext context = new HFileContextBuilder().withIncludesTags(false).build();
Writer w =
HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create();
w.close();
Reader r = HFile.createReader(fs, f, cacheConf, true, conf);
r.loadFileInfo();
assertFalse(r.getFirstKey().isPresent());
assertFalse(r.getLastKey().isPresent());
}
示例15: basicWithSomeCodec
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
/**
* test none codecs
*/
void basicWithSomeCodec(String codec) throws IOException {
if (cacheConf == null) cacheConf = new CacheConfig(conf);
Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString());
FSDataOutputStream fout = createFSOutput(ncTFile);
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withBlockSize(minBlockSize)
.withCompression(codec)
.create();
LOG.info(writer);
writeRecords(writer);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
Reader reader = HFile.createReaderFromStream(ncTFile, fs.open(ncTFile),
fs.getFileStatus(ncTFile).getLen(), cacheConf);
System.out.println(cacheConf.toString());
// Load up the index.
reader.loadFileInfo();
// Get a scanner that caches and that does not use pread.
HFileScanner scanner = reader.getScanner(true, false);
// Align scanner at start of the file.
scanner.seekTo();
readAllRecords(scanner);
scanner.seekTo(getSomeKey(50));
assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)) == 0);
// read the key and see if it matches
ByteBuffer readKey = scanner.getKey();
assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
Bytes.toBytes(readKey)));
scanner.seekTo(new byte[0]);
ByteBuffer val1 = scanner.getValue();
scanner.seekTo(new byte[0]);
ByteBuffer val2 = scanner.getValue();
assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2)));
reader.close();
fin.close();
fs.delete(ncTFile, true);
}