当前位置: 首页>>代码示例>>Java>>正文


Java FSDataInputStream.seek方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataInputStream.seek方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataInputStream.seek方法的具体用法?Java FSDataInputStream.seek怎么用?Java FSDataInputStream.seek使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataInputStream的用法示例。


在下文中一共展示了FSDataInputStream.seek方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifySeek

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/** test seek */
static void verifySeek(FileSystem fs, Path p, long offset, long length,
    byte[] buf, byte[] expected) throws IOException { 
  long remaining = length - offset;
  long checked = 0;
  LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);

  final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d",
      offset, remaining);
  final FSDataInputStream in = fs.open(p, 64 << 10);
  in.seek(offset);
  for(; remaining > 0; ) {
    t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
    final int n = (int)Math.min(remaining, buf.length);
    in.readFully(buf, 0, n);
    checkData(offset, remaining, n, buf, expected);

    offset += n;
    remaining -= n;
    checked += n;
  }
  in.close();
  t.end(checked);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestWebHDFS.java

示例2: tailFile

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private long tailFile(Path file, long startPos) throws IOException {
  long numRead = 0;
  FSDataInputStream inputStream = fileSystem.open(file);
  inputStream.seek(startPos);

  int len = 4 * 1024;
  byte[] buf = new byte[len];
  int read;
  while ((read = inputStream.read(buf)) > -1) {
    LOG.info(String.format("read %d bytes", read));

    if (!validateSequentialBytes(buf, (int) (startPos + numRead), read)) {
      LOG.error(String.format("invalid bytes: [%s]\n", Arrays.toString(buf)));
      throw new ChecksumException(
        String.format("unable to validate bytes"),
        startPos
      );
    }

    numRead += read;
  }

  inputStream.close();
  return numRead + startPos - 1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestFileConcurrentReader.java

示例3: loadPartitions

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private static void loadPartitions(Model model, Path matrixPath, FileSystem fs,
    List<Integer> partitionIds, int startPos, int endPos, ModelFilesMeta meta)
    throws IOException {

  FSDataInputStream input = null;
  long offset = 0;
  String currentFileName = "";
  for (int i = startPos; i < endPos; i++) {
    ModelPartitionMeta partMeta = meta.getPartMeta(partitionIds.get(i));
    String fileName = partMeta.getFileName();
    offset = partMeta.getOffset();
    if (!fileName.equals(currentFileName)) {
      currentFileName = fileName;
      if (input != null) {
        input.close();
      }
      input = fs.open(new Path(matrixPath, currentFileName));

    }
    input.seek(offset);
    loadPartition(model, input, partMeta);
  }

  if (input != null) {
    input.close();
  }
}
 
开发者ID:Tencent,项目名称:angel,代码行数:28,代码来源:ModelLoader.java

示例4: testReadClosedStream

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
@Test
public void testReadClosedStream() throws IOException {
  final Path testFile = new Path("/testfile+2");
  FSDataOutputStream os = hdfs.create(testFile, true);
  os.writeBytes("0123456789");
  os.close();

  // ByteRangeInputStream delays opens until reads. Make sure it doesn't
  // open a closed stream that has never been opened
  FSDataInputStream in = hftpFs.open(testFile);
  in.close();
  checkClosedStream(in);
  checkClosedStream(in.getWrappedStream());

  // force the stream to connect and then close it
  in = hftpFs.open(testFile);
  int ch = in.read();
  assertEquals('0', ch);
  in.close();
  checkClosedStream(in);
  checkClosedStream(in.getWrappedStream());

  // make sure seeking doesn't automagically reopen the stream
  in.seek(4);
  checkClosedStream(in);
  checkClosedStream(in.getWrappedStream());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestHftpFileSystem.java

示例5: readAndCompare

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private void readAndCompare(FSDataInputStream in, int position, int len)
    throws IOException {
  byte[] b = new byte[len];
  in.seek(position);
  IOUtils.readFully(in, b, 0, b.length);

  for (int i = 0; i < b.length; i++) {
    assertEquals(expected[position + i], b[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestFSInputChecker.java

示例6: loadSparseFloatRowFromPartition

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static Int2FloatOpenHashMap loadSparseFloatRowFromPartition(FSDataInputStream input,
    ModelPartitionMeta partMeta, int rowId) throws IOException {
  RowOffset rowOffset = partMeta.getRowMetas().get(rowId);
  input.seek(rowOffset.getOffset());
  Preconditions.checkState (input.readInt() == rowId);
  int num = input.readInt();
  Int2FloatOpenHashMap row = new Int2FloatOpenHashMap();
  for (int i = 0; i < num; i++) {
    row.put(input.readInt(), input.readFloat());
  }
  return row;
}
 
开发者ID:Tencent,项目名称:angel,代码行数:13,代码来源:ModelLoader.java

示例7: main

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    String uri = args[0];
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(uri), conf);
    FSDataInputStream in = null;

    try{
        in = fs.open(new Path(uri));
        IOUtils.copyBytes(in, System.out, 4096, false);
        in.seek(0); // 重新定位文件位置
        IOUtils.copyBytes(in, System.out, 4096, false);
    } finally {
        IOUtils.closeStream(in);
    }
}
 
开发者ID:myziyue,项目名称:learn-to-hadoop,代码行数:16,代码来源:FileSystemDoubleCat.java

示例8: loadSparseIntRowFromPartition

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static Int2IntOpenHashMap loadSparseIntRowFromPartition(FSDataInputStream input,
    ModelPartitionMeta partMeta, int rowId) throws IOException {
  RowOffset rowOffset = partMeta.getRowMetas().get(rowId);
  input.seek(rowOffset.getOffset());
  Preconditions.checkState (input.readInt() == rowId);
  int num = input.readInt();
  Int2IntOpenHashMap row = new Int2IntOpenHashMap();
  for (int i = 0; i < num; i++) {
    row.put(input.readInt(), input.readInt());
  }
  return row;
}
 
开发者ID:Tencent,项目名称:angel,代码行数:13,代码来源:ModelLoader.java

示例9: copyMapOutput

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Retrieve the map output of a single map task
 * and send it to the merger.
 */
private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException {
  // Figure out where the map task stored its output.
  Path mapOutputFileName = localMapFiles.get(mapTaskId).getOutputFile();
  Path indexFileName = mapOutputFileName.suffix(".index");

  // Read its index to determine the location of our split
  // and its size.
  SpillRecord sr = new SpillRecord(indexFileName, job);
  IndexRecord ir = sr.getIndex(reduce);

  long compressedLength = ir.partLength;
  long decompressedLength = ir.rawLength;

  compressedLength -= CryptoUtils.cryptoPadding(job);
  decompressedLength -= CryptoUtils.cryptoPadding(job);

  // Get the location for the map output - either in-memory or on-disk
  MapOutput<K, V> mapOutput = merger.reserve(mapTaskId, decompressedLength,
      id);

  // Check if we can shuffle *now* ...
  if (mapOutput == null) {
    LOG.info("fetcher#" + id + " - MergeManager returned Status.WAIT ...");
    return false;
  }

  // Go!
  LOG.info("localfetcher#" + id + " about to shuffle output of map " + 
           mapOutput.getMapId() + " decomp: " +
           decompressedLength + " len: " + compressedLength + " to " +
           mapOutput.getDescription());

  // now read the file, seek to the appropriate section, and send it.
  FileSystem localFs = FileSystem.getLocal(job).getRaw();
  FSDataInputStream inStream = localFs.open(mapOutputFileName);

  inStream = CryptoUtils.wrapIfNecessary(job, inStream);

  try {
    inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job));
    mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter);
  } finally {
    try {
      inStream.close();
    } catch (IOException ioe) {
      LOG.warn("IOException closing inputstream from map output: "
          + ioe.toString());
    }
  }

  scheduler.copySucceeded(mapTaskId, LOCALHOST, compressedLength, 0, 0,
      mapOutput);
  return true; // successful fetch.
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:LocalFetcher.java

示例10: loadPartitions

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private void loadPartitions(Path matrixPath, FileSystem fs,
  List<Integer> partitionIds, int startPos, int endPos,
  PSModelFilesMeta serverMatrixMeta) throws IOException {

  ServerPartition partition = null;
  FSDataInputStream input = null;
  long offset = 0;
  String currentFileName = "";
  for(int i = startPos; i < endPos; i++) {
    partition = partitionMaps.get(partitionIds.get(i));
    ModelPartitionMeta partMeta = serverMatrixMeta.getPartitionMeta(partitionIds.get(i));
    String fileName = partMeta.getFileName();
    offset = partMeta.getOffset();
    if(!fileName.equals(currentFileName)) {
      currentFileName = fileName;
      if(input != null) {
        input.close();
      }
      input = fs.open(new Path(matrixPath, currentFileName));

    }
    input.seek(offset);
    partition.load(input);
  }

  if(input != null) {
    input.close();
  }
}
 
开发者ID:Tencent,项目名称:angel,代码行数:30,代码来源:ServerMatrix.java

示例11: main

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
    final String uri = "hdfs://master:8020/user/tom/aa.txt";
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(uri), conf);
    FSDataInputStream in = null;
    try {
        in = fs.open(new Path(uri));
        IOUtils.copyBytes(in, System.out, 4096, false);
        in.seek(0); // 回到文件开头
        IOUtils.copyBytes(in, System.out, 4096, false);
    } finally {
        IOUtils.closeStream(in);
    }
}
 
开发者ID:MedusaLeee,项目名称:HadoopGuides,代码行数:15,代码来源:FileSystemDoubleCat.java

示例12: readDictionary

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static Dictionary readDictionary(FSDataInputStream in, ColumnDescriptor column, PageHeaderWithOffset pageHeader, BytesDecompressor decompressor) throws IOException {
  in.seek(pageHeader.getOffset());
  final byte[] data = new byte[pageHeader.getPageHeader().getCompressed_page_size()];
  int read = in.read(data);
  if (read != data.length) {
    throw new IOException(format("Failed to read dictionary page, read %d bytes, expected %d", read, data.length));
  }
  final DictionaryPage dictionaryPage = new DictionaryPage(
    decompressor.decompress(BytesInput.from(data), pageHeader.getPageHeader().getUncompressed_page_size()),
    pageHeader.getPageHeader().getDictionary_page_header().getNum_values(),
    CONVERTER.getEncoding(pageHeader.getPageHeader().getDictionary_page_header().getEncoding()));
  return dictionaryPage.getEncoding().initDictionary(column, dictionaryPage);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:14,代码来源:LocalDictionariesReader.java

示例13: Reader

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Constructor
 * 
 * @param fin
 *          FS input stream.
 * @param fileLength
 *          Length of the corresponding file
 * @throws IOException
 */
public Reader(FSDataInputStream fin, long fileLength, Configuration conf)
    throws IOException {
  this.in = fin;
  this.conf = conf;

  // move the cursor to the beginning of the tail, containing: offset to the
  // meta block index, version and magic
  fin.seek(fileLength - Magic.size() - Version.size() - Long.SIZE
      / Byte.SIZE);
  long offsetIndexMeta = fin.readLong();
  version = new Version(fin);
  Magic.readAndVerify(fin);

  if (!version.compatibleWith(BCFile.API_VERSION)) {
    throw new RuntimeException("Incompatible BCFile fileBCFileVersion.");
  }

  // read meta index
  fin.seek(offsetIndexMeta);
  metaIndex = new MetaIndex(fin);

  // read data:BCFile.index, the data block index
  BlockReader blockR = getMetaBlock(DataIndex.BLOCK_NAME);
  try {
    dataIndex = new DataIndex(blockR);
  } finally {
    blockR.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:39,代码来源:BCFile.java

示例14: verifyRead

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Verify that the read at a specific offset in a stream
 * matches that expected
 * @param stm stream
 * @param fileContents original file contents
 * @param seekOff seek offset
 * @param toRead number of bytes to read
 * @throws IOException IO problems
 */
public static void verifyRead(FSDataInputStream stm, byte[] fileContents,
                              int seekOff, int toRead) throws IOException {
  byte[] out = new byte[toRead];
  stm.seek(seekOff);
  stm.readFully(out);
  byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
                                       seekOff + toRead);
  compareByteArrays(expected, out,toRead);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:ContractTestUtils.java

示例15: testSeek

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Tests seek().
 */
@Test
public void testSeek() throws IOException {
  final Path testFile = new Path("/testfile+1");
  FSDataOutputStream out = hdfs.create(testFile, true);
  out.writeBytes("0123456789");
  out.close();
  FSDataInputStream in = hftpFs.open(testFile);
  in.seek(7);
  assertEquals('7', in.read());
  in.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestHftpFileSystem.java


注:本文中的org.apache.hadoop.fs.FSDataInputStream.seek方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。