本文整理汇总了Java中org.apache.hadoop.io.BytesWritable.getSize方法的典型用法代码示例。如果您正苦于以下问题:Java BytesWritable.getSize方法的具体用法?Java BytesWritable.getSize怎么用?Java BytesWritable.getSize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.BytesWritable
的用法示例。
在下文中一共展示了BytesWritable.getSize方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: seekTFile
import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader =
new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
Scanner scanner = reader.createScanner();
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
scanner.lowerBound(key.get(), 0, key.getSize());
if (!scanner.atEnd()) {
scanner.entry().get(key, val);
totalBytes += key.getSize();
totalBytes += val.getSize();
}
else {
++miss;
}
}
timer.stop();
double duration = (double) timer.read() / 1000; // in us.
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
示例2: createTFile
import org.apache.hadoop.io.BytesWritable; //导入方法依赖的package包/类
private void createTFile() throws IOException {
long totalBytes = 0;
FSDataOutputStream fout = createFSOutput(path, fs);
try {
Writer writer =
new Writer(fout, options.minBlockSize, options.compress, "memcmp",
conf);
try {
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.start();
for (long i = 0; true; ++i) {
if (i % 1000 == 0) { // test the size for every 1000 rows.
if (fs.getFileStatus(path).getLen() >= options.fileSize) {
break;
}
}
kvGen.next(key, val, false);
writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
.getSize());
totalBytes += key.getSize();
totalBytes += val.getSize();
}
timer.stop();
}
finally {
writer.close();
}
}
finally {
fout.close();
}
double duration = (double)timer.read()/1000; // in us.
long fsize = fs.getFileStatus(path).getLen();
System.out.printf(
"time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
/ duration);
System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}