本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.Reader.getLastKey方法的典型用法代码示例。如果您正苦于以下问题:Java Reader.getLastKey方法的具体用法?Java Reader.getLastKey怎么用?Java Reader.getLastKey使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFile.Reader
的用法示例。
在下文中一共展示了Reader.getLastKey方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(KeyValue.createKeyValueFromKey(k)) >= 0) {
ByteBuffer bbkey = scanner.getKey();
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例2: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf));
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(k) >= 0) {
ByteBuffer bbkey = scanner.getKey();
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例3: seekTFile
import org.apache.hadoop.hbase.io.hfile.HFile.Reader; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader = HFile.createReaderFromStream(path, fsdis,
fs.getFileStatus(path).getLen(), new CacheConfig(conf), conf);
reader.loadFileInfo();
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
HFileScanner scanner = reader.getScanner(false, USE_PREAD);
BytesWritable key = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
if (scanner.seekTo(k) >= 0) {
ByteBuffer bbkey = scanner.getKey();
ByteBuffer bbval = scanner.getValue();
totalBytes += bbkey.limit();
totalBytes += bbval.limit();
}
else {
++miss;
}
}
timer.stop();
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}