本文整理汇总了Java中org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry类的典型用法代码示例。如果您正苦于以下问题:Java Entry类的具体用法?Java Entry怎么用?Java Entry使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Entry类属于org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner包,在下文中一共展示了Entry类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readFully
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
@Override
public void readFully(TreeMap<Slice, Slice> data) throws IOException
{
scanner.rewind();
for (; !scanner.atEnd(); scanner.advance()) {
Entry en = scanner.entry();
int klen = en.getKeyLength();
int vlen = en.getValueLength();
byte[] key = new byte[klen];
byte[] value = new byte[vlen];
en.getKey(key);
en.getValue(value);
data.put(new Slice(key, 0, key.length), new Slice(value, 0, value.length));
}
}
示例2: peek
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
@Override
public boolean peek(Slice key, Slice value) throws IOException
{
if (scanner.atEnd()) {
return false;
}
Entry en = scanner.entry();
byte[] rkey = new byte[en.getKeyLength()];
byte[] rval = new byte[en.getValueLength()];
en.getKey(rkey);
en.getValue(rval);
key.buffer = rkey;
key.offset = 0;
key.length = en.getKeyLength();
value.buffer = rval;
value.offset = 0;
value.length = en.getValueLength();
return true;
}
示例3: summary
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
@AfterClass
public static void summary() throws Exception
{
long heapMax = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
long nonHeapMax = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage().getMax();
logger.info("==============================================================================");
logger.info("Test Size: " + String.format("%,d", testSize) + " pairs (" +
String.format("%,d", keySizeBytes) + " key bytes /" + String.format("%,d", valueSizeBytes) + " value bytes)");
logger.info("Memory: " + String.format("%,d", heapMax) + " Heap MAX + "
+ String.format("%,d", nonHeapMax) + " Non-Heap Max = " + String.format("%,d", (heapMax + nonHeapMax)) +
" Total MAX");
logger.info("==============================================================================");
logger.info("KV PAIRS (" + keySizeBytes + "/" + valueSizeBytes + "), " +
"TEST ID, ELAPSED TIME (μs/microseconds), FILE SIZE (bytes)");
Iterator<?> it = testSummary.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<?,?> kv = (Map.Entry<?,?>)it.next();
logger.info(kv.getKey() + "," + kv.getValue());
}
}
示例4: readTFileSeqId
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
private void readTFileSeqId(Path file) throws IOException
{
FSDataInputStream in = hdfs.open(file);
long size = hdfs.getContentSummary(file).getLength();
TFile.Reader reader = new TFile.Reader(in, size, new Configuration());
Scanner scanner = reader.createScanner();
scanner.rewind();
for (int i = 0; i < testSize; i++) {
scanner.seekTo(getKey(i).getBytes());
Entry en = scanner.entry();
en.get(new BytesWritable(new byte[en.getKeyLength()]), new BytesWritable(new byte[en.getValueLength()]));
}
reader.close();
}
示例5: readTFileSeq
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
private void readTFileSeq(Path file) throws IOException
{
FSDataInputStream in = hdfs.open(file);
long size = hdfs.getContentSummary(file).getLength();
TFile.Reader reader = new TFile.Reader(in, size, new Configuration());
Scanner scanner = reader.createScanner();
scanner.rewind();
do {
Entry en = scanner.entry();
en.get(new BytesWritable(new byte[en.getKeyLength()]), new BytesWritable(new byte[en.getValueLength()]));
} while (scanner.advance() && !scanner.atEnd());
reader.close();
}
示例6: readDTFileSeq
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
private void readDTFileSeq(Path file) throws IOException
{
FSDataInputStream in = hdfs.open(file);
long size = hdfs.getContentSummary(file).getLength();
org.apache.hadoop.io.file.tfile.DTFile.Reader reader =
new org.apache.hadoop.io.file.tfile.DTFile.Reader(in, size, new Configuration());
org.apache.hadoop.io.file.tfile.DTFile.Reader.Scanner scanner = reader.createScanner();
scanner.rewind();
do {
org.apache.hadoop.io.file.tfile.DTFile.Reader.Scanner.Entry en = scanner.entry();
en.getBlockBuffer();
en.getKeyOffset();
en.getKeyLength();
en.getValueLength();
en.getValueOffset();
} while (scanner.advance() && !scanner.atEnd());
reader.close();
}
示例7: readDTFileRandom
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
private void readDTFileRandom(Path file) throws IOException
{
Random random = new Random();
FSDataInputStream in = hdfs.open(file);
long size = hdfs.getContentSummary(file).getLength();
org.apache.hadoop.io.file.tfile.DTFile.Reader reader =
new org.apache.hadoop.io.file.tfile.DTFile.Reader(in, size, new Configuration());
org.apache.hadoop.io.file.tfile.DTFile.Reader.Scanner scanner = reader.createScanner();
scanner.rewind();
for (int i = 0; i < testSize; i++) {
scanner.seekTo(getKey(random.nextInt(testSize)).getBytes());
org.apache.hadoop.io.file.tfile.DTFile.Reader.Scanner.Entry en = scanner.entry();
en.getBlockBuffer();
en.getKeyOffset();
en.getKeyLength();
en.getValueLength();
en.getValueOffset();
}
reader.close();
}
示例8: readDTFileSeqId
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
private void readDTFileSeqId(Path file) throws IOException
{
FSDataInputStream in = hdfs.open(file);
long size = hdfs.getContentSummary(file).getLength();
org.apache.hadoop.io.file.tfile.DTFile.Reader reader =
new org.apache.hadoop.io.file.tfile.DTFile.Reader(in, size, new Configuration());
org.apache.hadoop.io.file.tfile.DTFile.Reader.Scanner scanner = reader.createScanner();
scanner.rewind();
for (int i = 0; i < testSize; i++) {
scanner.seekTo(getKey(i).getBytes());
org.apache.hadoop.io.file.tfile.DTFile.Reader.Scanner.Entry en = scanner.entry();
en.getBlockBuffer();
en.getKeyOffset();
en.getKeyLength();
en.getValueLength();
en.getValueOffset();
}
reader.close();
}
示例9: next
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
@Override
public boolean next() throws IOException {
if (scanner.atEnd()) return false;
Entry entry = scanner.entry();
keyLength = entry.getKeyLength();
checkKeyBuffer(keyLength);
entry.getKey(keyBuffer);
valueLength = entry.getValueLength();
checkValueBuffer(valueLength);
entry.getValue(valueBuffer);
scanner.advance();
return true;
}
示例10: next
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry; //导入依赖的package包/类
public boolean next() throws IOException {
if (scanner.atEnd()) return false;
Entry entry = scanner.entry();
keyLength = entry.getKeyLength();
checkKeyBuffer(keyLength);
entry.getKey(keyBuffer);
valueLength = entry.getValueLength();
checkValueBuffer(valueLength);
entry.getValue(valueBuffer);
scanner.advance();
return true;
}