本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.writeLong方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.writeLong方法的具体用法?Java FSDataOutputStream.writeLong怎么用?Java FSDataOutputStream.writeLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FSDataOutputStream
的用法示例。
在下文中一共展示了FSDataOutputStream.writeLong方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createIndexFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static void createIndexFile(File indexFile, Configuration conf)
throws IOException {
if (indexFile.exists()) {
System.out.println("Deleting existing file");
indexFile.delete();
}
indexFile.createNewFile();
FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
new Path(indexFile.getAbsolutePath()));
Checksum crc = new PureJavaCrc32();
crc.reset();
CheckedOutputStream chk = new CheckedOutputStream(output, crc);
String msg = "Writing new index file. This file will be used only " +
"for the testing.";
chk.write(Arrays.copyOf(msg.getBytes(),
MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
output.writeLong(chk.getChecksum().getValue());
output.close();
}
示例2: writeIndex
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
static long writeIndex(final FSDataOutputStream o,
final List<byte []> keys, final List<Long> offsets,
final List<Integer> sizes)
throws IOException {
long pos = o.getPos();
// Don't write an index if nothing in the index.
if (keys.size() > 0) {
o.write(INDEXBLOCKMAGIC);
// Write the index.
for (int i = 0; i < keys.size(); ++i) {
o.writeLong(offsets.get(i).longValue());
o.writeInt(sizes.get(i).intValue());
byte [] key = keys.get(i);
Bytes.writeByteArray(o, key);
}
}
return pos;
}
示例3: txt2dat
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static void txt2dat(Path dir, String inputFile, String outputFile)
throws IOException {
FileSystem fileSystem = dir.getFileSystem(new Configuration());
Path in = new Path(dir, inputFile);
Path out = new Path(dir, outputFile);
FSDataInputStream fsDataInputStream = fileSystem.open(in);
InputStreamReader inputStreamReader = new InputStreamReader(fsDataInputStream);
BufferedReader reader = new BufferedReader(inputStreamReader);
FSDataOutputStream writer = fileSystem.create(out);
try {
String line;
line = reader.readLine();
while (line != null){
String[] keyVal = line.split("\\t");
writer.writeLong(Long.parseLong(keyVal[0]));
for (String aij : keyVal[1].split(",")) {
writer.writeDouble(Double.parseDouble(aij));
}
line = reader.readLine();
}
} finally {
reader.close();
inputStreamReader.close();
fsDataInputStream.close();
writer.flush();
writer.close();
}
}
示例4: writeToStream
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
Stopwatch watch = new Stopwatch();
watch.start();
available = false;
check = ThreadLocalRandom.current().nextLong();
start = stream.getPos();
logger.debug("Writing check value {} at position {}", check, start);
stream.writeLong(check);
batch.getHeader().writeDelimitedTo(stream);
ByteBuf buf = batch.getBody();
if (buf != null) {
bodyLength = buf.capacity();
} else {
bodyLength = 0;
}
if (bodyLength > 0) {
buf.getBytes(0, stream, bodyLength);
}
stream.hsync();
FileStatus status = fs.getFileStatus(path);
long len = status.getLen();
logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
batch.sendOk();
latch.countDown();
long t = watch.elapsed(TimeUnit.MICROSECONDS);
logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
if (buf != null) {
buf.release();
}
}
示例5: testBadIndex
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void testBadIndex() throws Exception {
final int parts = 30;
fs.delete(p, true);
conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
IndexCache cache = new IndexCache(conf);
Path f = new Path(p, "badindex");
FSDataOutputStream out = fs.create(f, false);
CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
DataOutputStream dout = new DataOutputStream(iout);
for (int i = 0; i < parts; ++i) {
for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
if (0 == (i % 3)) {
dout.writeLong(i);
} else {
out.writeLong(i);
}
}
}
out.writeLong(iout.getChecksum().getValue());
dout.close();
try {
cache.getIndexInformation("badindex", 7, f,
UserGroupInformation.getCurrentUser().getShortUserName());
fail("Did not detect bad checksum");
} catch (IOException e) {
if (!(e.getCause() instanceof ChecksumException)) {
throw e;
}
}
}
示例6: writeFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static void writeFile(FileSystem fs, Path f, long fill, int parts)
throws IOException {
FSDataOutputStream out = fs.create(f, false);
CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
DataOutputStream dout = new DataOutputStream(iout);
for (int i = 0; i < parts; ++i) {
for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
dout.writeLong(fill);
}
}
out.writeLong(iout.getChecksum().getValue());
dout.close();
}
示例7: writeToStream
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
Stopwatch watch = Stopwatch.createStarted();
ByteBuf buf = null;
try {
check = ThreadLocalRandom.current().nextLong();
start = stream.getPos();
logger.debug("Writing check value {} at position {}", check, start);
stream.writeLong(check);
batch.getHeader().writeDelimitedTo(stream);
buf = batch.getBody();
if (buf != null) {
bodyLength = buf.capacity();
} else {
bodyLength = 0;
}
if (bodyLength > 0) {
buf.getBytes(0, stream, bodyLength);
}
stream.hsync();
FileStatus status = spillFile.getFileStatus();
long len = status.getLen();
logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
long t = watch.elapsed(TimeUnit.MICROSECONDS);
logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
} finally {
// even if the try block throws an exception we still want to send an ACK and release the lock
// the caller will add the exception to deferred attribute and it will be thrown when the poll() method is called
try {
batch.sendOk(); // this can also throw an exception
} finally {
state = BatchState.SPILLED;
batch = null;
if (buf != null) {
buf.release();
}
}
}
}