当前位置: 首页>>代码示例>>Java>>正文


Java FSDataInputStream.getPos方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataInputStream.getPos方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataInputStream.getPos方法的具体用法?Java FSDataInputStream.getPos怎么用?Java FSDataInputStream.getPos使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataInputStream的用法示例。


在下文中一共展示了FSDataInputStream.getPos方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: dumpFromOffset

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private long dumpFromOffset(PathData item, long offset) throws IOException {
  long fileSize = item.refreshStatus().getLen();
  if (offset > fileSize) return fileSize;
  // treat a negative offset as relative to end of the file, floor of 0
  if (offset < 0) {
    offset = Math.max(fileSize + offset, 0);
  }
  
  FSDataInputStream in = item.fs.open(item.path);
  try {
    in.seek(offset);
    // use conf so the system configured io block size is used
    IOUtils.copyBytes(in, System.out, getConf(), false);
    offset = in.getPos();
  } finally {
    in.close();
  }
  return offset;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:Tail.java

示例2: getSplitDetails

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private <T> T getSplitDetails(Path file, long offset) 
 throws IOException {
  FileSystem fs = file.getFileSystem(conf);
  FSDataInputStream inFile = fs.open(file);
  inFile.seek(offset);
  String className = StringInterner.weakIntern(Text.readString(inFile));
  Class<T> cls;
  try {
    cls = (Class<T>) conf.getClassByName(className);
  } catch (ClassNotFoundException ce) {
    IOException wrap = new IOException("Split class " + className + 
                                        " not found");
    wrap.initCause(ce);
    throw wrap;
  }
  SerializationFactory factory = new SerializationFactory(conf);
  Deserializer<T> deserializer = 
    (Deserializer<T>) factory.getDeserializer(cls);
  deserializer.open(inFile);
  T split = deserializer.deserialize(null);
  long pos = inFile.getPos();
  getCounters().findCounter(
      TaskCounter.SPLIT_RAW_BYTES).increment(pos - offset);
  inFile.close();
  return split;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:MapTask.java

示例3: DeprecatedSingleStreamPageReader

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
DeprecatedSingleStreamPageReader(ColumnReader<?> parentStatus, FSDataInputStream inputStream, Path path, ColumnChunkMetaData columnChunkMetaData) throws ExecutionSetupException {
  super(parentStatus, inputStream, path, columnChunkMetaData);
  try {
    lastPosition = inputStream.getPos();
  } catch (IOException e) {
    throw new ExecutionSetupException("Error in getting current position for parquet file at location: " + path, e);
  }
  this.inputStream = inputStream;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:10,代码来源:DeprecatedSingleStreamPageReader.java

示例4: initInternal

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private String initInternal(FSDataInputStream stream, boolean isFirst)
    throws IOException {
  close();
  long expectedPos = PB_WAL_MAGIC.length;
  if (stream == null) {
    stream = fs.open(path);
    stream.seek(expectedPos);
  }
  if (stream.getPos() != expectedPos) {
    throw new IOException("The stream is at invalid position: " + stream.getPos());
  }
  // Initialize metadata or, when we reset, just skip the header.
  WALProtos.WALHeader.Builder builder = WALProtos.WALHeader.newBuilder();
  WALHdrContext hdrCtxt = readHeader(builder, stream);
  WALHdrResult walHdrRes = hdrCtxt.getResult();
  if (walHdrRes == WALHdrResult.EOF) {
    throw new EOFException("Couldn't read WAL PB header");
  }
  if (walHdrRes == WALHdrResult.UNKNOWN_WRITER_CLS) {
    throw new IOException("Got unknown writer class: " + builder.getWriterClsName());
  }
  if (isFirst) {
    WALProtos.WALHeader header = builder.build();
    this.hasCompression = header.hasHasCompression() && header.getHasCompression();
    this.hasTagCompression = header.hasHasTagCompression() && header.getHasTagCompression();
  }
  this.inputStream = stream;
  this.walEditsStopOffset = this.fileLength;
  long currentPosition = stream.getPos();
  trailerPresent = setTrailerIfPresent();
  this.seekOnFs(currentPosition);
  if (LOG.isTraceEnabled()) {
    LOG.trace("After reading the trailer: walEditsStopOffset: " + this.walEditsStopOffset
        + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + trailerPresent);
  }
  
  codecClsName = hdrCtxt.getCellCodecClsName();
  
  return hdrCtxt.getCellCodecClsName();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:ProtobufLogReader.java

示例5: readUntilEnd

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * read chunks into buffer repeatedly until total of VisibleLen byte are read.
 * Return total number of bytes read
 */
private long readUntilEnd(FSDataInputStream in, byte[] buffer, long size,
    String fname, long pos, long visibleLen, boolean positionReadOption)
    throws IOException {

  if (pos >= visibleLen || visibleLen <= 0)
    return 0;

  int chunkNumber = 0;
  long totalByteRead = 0;
  long currentPosition = pos;
  int byteRead = 0;
  long byteLeftToRead = visibleLen - pos;
  int byteToReadThisRound = 0;

  if (!positionReadOption) {
    in.seek(pos);
    currentPosition = in.getPos();
  }
  if (verboseOption)
    LOG.info("reader begin: position: " + pos + " ; currentOffset = "
        + currentPosition + " ; bufferSize =" + buffer.length
        + " ; Filename = " + fname);
  try {
    while (byteLeftToRead > 0 && currentPosition < visibleLen) {
      byteToReadThisRound = (int) (byteLeftToRead >= buffer.length 
          ? buffer.length : byteLeftToRead);
      if (positionReadOption) {
        byteRead = in.read(currentPosition, buffer, 0, byteToReadThisRound);
      } else {
        byteRead = in.read(buffer, 0, byteToReadThisRound);
      }
      if (byteRead <= 0)
        break;
      chunkNumber++;
      totalByteRead += byteRead;
      currentPosition += byteRead;
      byteLeftToRead -= byteRead;

      if (verboseOption) {
        LOG.info("reader: Number of byte read: " + byteRead
            + " ; totalByteRead = " + totalByteRead + " ; currentPosition="
            + currentPosition + " ; chunkNumber =" + chunkNumber
            + "; File name = " + fname);
      }
    }
  } catch (IOException e) {
    throw new IOException(
        "#### Exception caught in readUntilEnd: reader  currentOffset = "
            + currentPosition + " ; totalByteRead =" + totalByteRead
            + " ; latest byteRead = " + byteRead + "; visibleLen= "
            + visibleLen + " ; bufferLen = " + buffer.length
            + " ; Filename = " + fname, e);
  }

  if (verboseOption)
    LOG.info("reader end:   position: " + pos + " ; currentOffset = "
        + currentPosition + " ; totalByteRead =" + totalByteRead
        + " ; Filename = " + fname);

  return totalByteRead;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestWriteRead.java

示例6: SingleStreamColumnChunkIncPageReader

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public SingleStreamColumnChunkIncPageReader(ColumnChunkMetaData metaData, ColumnDescriptor columnDescriptor, FSDataInputStream in) throws IOException {
  super(metaData, columnDescriptor, in);
  lastPosition = in.getPos();
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:5,代码来源:ColumnChunkIncReadStore.java


注:本文中的org.apache.hadoop.fs.FSDataInputStream.getPos方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。