当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.writeLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.writeLong方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.writeLong方法的具体用法?Java FSDataOutputStream.writeLong怎么用?Java FSDataOutputStream.writeLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.writeLong方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createIndexFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestShuffleHandler.java

示例2: writeIndex

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
static long writeIndex(final FSDataOutputStream o,
  final List<byte []> keys, final List<Long> offsets,
  final List<Integer> sizes)
throws IOException {
  long pos = o.getPos();
  // Don't write an index if nothing in the index.
  if (keys.size() > 0) {
    o.write(INDEXBLOCKMAGIC);
    // Write the index.
    for (int i = 0; i < keys.size(); ++i) {
      o.writeLong(offsets.get(i).longValue());
      o.writeInt(sizes.get(i).intValue());
      byte [] key = keys.get(i);
      Bytes.writeByteArray(o, key);
    }
  }
  return pos;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:IndexFile.java

示例3: txt2dat

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static void txt2dat(Path dir, String inputFile, String outputFile)
        throws IOException {

    FileSystem fileSystem = dir.getFileSystem(new Configuration());

    Path in = new Path(dir, inputFile);
    Path out = new Path(dir, outputFile);

    FSDataInputStream fsDataInputStream = fileSystem.open(in);
    InputStreamReader inputStreamReader = new InputStreamReader(fsDataInputStream);
    BufferedReader reader = new BufferedReader(inputStreamReader);

    FSDataOutputStream writer = fileSystem.create(out);

    try {
        String line;
        line = reader.readLine();
        while (line != null){

            String[] keyVal = line.split("\\t");
            writer.writeLong(Long.parseLong(keyVal[0]));

            for (String aij : keyVal[1].split(",")) {
                writer.writeDouble(Double.parseDouble(aij));
            }

            line = reader.readLine();
        }
    } finally {
        reader.close();
        inputStreamReader.close();
        fsDataInputStream.close();
        writer.flush();
        writer.close();
    }
}
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:37,代码来源:MatrixByteConverter.java

示例4: writeToStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
  Stopwatch watch = new Stopwatch();
  watch.start();
  available = false;
  check = ThreadLocalRandom.current().nextLong();
  start = stream.getPos();
  logger.debug("Writing check value {} at position {}", check, start);
  stream.writeLong(check);
  batch.getHeader().writeDelimitedTo(stream);
  ByteBuf buf = batch.getBody();
  if (buf != null) {
    bodyLength = buf.capacity();
  } else {
    bodyLength = 0;
  }
  if (bodyLength > 0) {
    buf.getBytes(0, stream, bodyLength);
  }
  stream.hsync();
  FileStatus status = fs.getFileStatus(path);
  long len = status.getLen();
  logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
  batch.sendOk();
  latch.countDown();
  long t = watch.elapsed(TimeUnit.MICROSECONDS);
  logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
  if (buf != null) {
    buf.release();
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:31,代码来源:SpoolingRawBatchBuffer.java

示例5: testBadIndex

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void testBadIndex() throws Exception {
  final int parts = 30;
  fs.delete(p, true);
  conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
  IndexCache cache = new IndexCache(conf);

  Path f = new Path(p, "badindex");
  FSDataOutputStream out = fs.create(f, false);
  CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
  DataOutputStream dout = new DataOutputStream(iout);
  for (int i = 0; i < parts; ++i) {
    for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
      if (0 == (i % 3)) {
        dout.writeLong(i);
      } else {
        out.writeLong(i);
      }
    }
  }
  out.writeLong(iout.getChecksum().getValue());
  dout.close();
  try {
    cache.getIndexInformation("badindex", 7, f,
      UserGroupInformation.getCurrentUser().getShortUserName());
    fail("Did not detect bad checksum");
  } catch (IOException e) {
    if (!(e.getCause() instanceof ChecksumException)) {
      throw e;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestIndexCache.java

示例6: writeFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static void writeFile(FileSystem fs, Path f, long fill, int parts)
    throws IOException {
  FSDataOutputStream out = fs.create(f, false);
  CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
  DataOutputStream dout = new DataOutputStream(iout);
  for (int i = 0; i < parts; ++i) {
    for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
      dout.writeLong(fill);
    }
  }
  out.writeLong(iout.getChecksum().getValue());
  dout.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestIndexCache.java

示例7: writeToStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
  Stopwatch watch = Stopwatch.createStarted();
  ByteBuf buf = null;
  try {
    check = ThreadLocalRandom.current().nextLong();
    start = stream.getPos();
    logger.debug("Writing check value {} at position {}", check, start);
    stream.writeLong(check);
    batch.getHeader().writeDelimitedTo(stream);
    buf = batch.getBody();
    if (buf != null) {
      bodyLength = buf.capacity();
    } else {
      bodyLength = 0;
    }
    if (bodyLength > 0) {
      buf.getBytes(0, stream, bodyLength);
    }
    stream.hsync();
    FileStatus status = spillFile.getFileStatus();
    long len = status.getLen();
    logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
    long t = watch.elapsed(TimeUnit.MICROSECONDS);
    logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
  } finally {
    // even if the try block throws an exception we still want to send an ACK and release the lock
    // the caller will add the exception to deferred attribute and it will be thrown when the poll() method is called
    try {
      batch.sendOk(); // this can also throw an exception
    } finally {
      state = BatchState.SPILLED;
      batch = null;
      if (buf != null) {
        buf.release();
      }
    }
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:39,代码来源:SpoolingRawBatchBuffer.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.writeLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。