当前位置: 首页>>代码示例>>Java>>正文


Java FSDataInputStream.readFully方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataInputStream.readFully方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataInputStream.readFully方法的具体用法?Java FSDataInputStream.readFully怎么用?Java FSDataInputStream.readFully使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataInputStream的用法示例。


在下文中一共展示了FSDataInputStream.readFully方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyFile

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
    int size) throws IOException {
  AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
      + "Validating its contents now...");

  // verify that file-size matches
  assertTrue("File should be " + size + " bytes, but is actually " +
             " found to be " + dfs.getFileStatus(filepath).getLen() +
             " bytes",
             dfs.getFileStatus(filepath).getLen() == size);

  // verify that there is enough data to read.
  System.out.println("File size is good. Now validating sizes from datanodes...");
  FSDataInputStream stmin = dfs.open(filepath);
  stmin.readFully(0, actual, 0, size);
  stmin.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestLeaseRecovery2.java

示例2: readTableDescriptor

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private static HTableDescriptor readTableDescriptor(FileSystem fs,
                                                    FileStatus status) throws IOException {
  int len = Ints.checkedCast(status.getLen());
  byte [] content = new byte[len];
  FSDataInputStream fsDataInputStream = fs.open(status.getPath());
  try {
    fsDataInputStream.readFully(content);
  } finally {
    fsDataInputStream.close();
  }
  HTableDescriptor htd = null;
  try {
    htd = HTableDescriptor.parseFrom(content);
  } catch (DeserializationException e) {
    throw new IOException("content=" + Bytes.toShort(content), e);
  }
  return htd;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:NamespaceUpgrade.java

示例3: checkFiles

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/** check if the files have been copied correctly. */
public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
  Path root = new Path(topdir);
  
  for (int idx = 0; idx < nFiles; idx++) {
    Path fPath = new Path(root, files[idx].getName());
    FSDataInputStream in = fs.open(fPath);
    byte[] toRead = new byte[files[idx].getSize()];
    byte[] toCompare = new byte[files[idx].getSize()];
    Random rb = new Random(files[idx].getSeed());
    rb.nextBytes(toCompare);
    in.readFully(0, toRead);
    in.close();
    for (int i = 0; i < toRead.length; i++) {
      if (toRead[i] != toCompare[i]) {
        return false;
      }
    }
    toRead = null;
    toCompare = null;
  }
  
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSTestUtil.java

示例4: checkFile

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestSmallBlock.java

示例5: verifySeek

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/** test seek */
static void verifySeek(FileSystem fs, Path p, long offset, long length,
    byte[] buf, byte[] expected) throws IOException { 
  long remaining = length - offset;
  long checked = 0;
  LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);

  final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d",
      offset, remaining);
  final FSDataInputStream in = fs.open(p, 64 << 10);
  in.seek(offset);
  for(; remaining > 0; ) {
    t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
    final int n = (int)Math.min(remaining, buf.length);
    in.readFully(buf, 0, n);
    checkData(offset, remaining, n, buf, expected);

    offset += n;
    remaining -= n;
    checked += n;
  }
  in.close();
  t.end(checked);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestWebHDFS.java

示例6: getRecordReader

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public RecordReader getRecordReader(InputSplit split, JobConf job,
  Reporter reporter) throws IOException {
  FileSplit fileSplit = (FileSplit) split;
  FileSystem fs = FileSystem.get(fileSplit.getPath().toUri(), job);
  FSDataInputStream is = fs.open(fileSplit.getPath());
  byte[] header = new byte[3];
  RecordReader reader = null;
  try {
    is.readFully(header);
  } catch (EOFException eof) {
    reader = textInputFormat.getRecordReader(split, job, reporter);
  } finally {
    is.close();
  }
  if (header[0] == 'S' && header[1] == 'E' && header[2] == 'Q') {
    reader = seqFileInputFormat.getRecordReader(split, job, reporter);
  } else {
    reader = textInputFormat.getRecordReader(split, job, reporter);
  }
  return reader;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:AutoInputFormat.java

示例7: readBytesToString

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Read in "length" bytes, convert to an ascii string
 * @param fs filesystem
 * @param path path to read
 * @param length #of bytes to read.
 * @return the bytes read and converted to a string
 * @throws IOException IO problems
 */
public static String readBytesToString(FileSystem fs,
                                Path path,
                                int length) throws IOException {
  FSDataInputStream in = fs.open(path);
  try {
    byte[] buf = new byte[length];
    in.readFully(0, buf);
    return toChar(buf);
  } finally {
    in.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:ContractTestUtils.java

示例8: readFromStream

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Reads a file trailer from the given file.
 *
 * @param istream the input stream with the ability to seek. Does not have to
 *          be buffered, as only one read operation is made.
 * @param fileSize the file size. Can be obtained using
 *          {@link org.apache.hadoop.fs.FileSystem#getFileStatus(
 *          org.apache.hadoop.fs.Path)}.
 * @return the fixed file trailer read
 * @throws IOException if failed to read from the underlying stream, or the
 *           trailer is corrupted, or the version of the trailer is
 *           unsupported
 */
public static FixedFileTrailer readFromStream(FSDataInputStream istream,
    long fileSize) throws IOException {
  int bufferSize = MAX_TRAILER_SIZE;
  long seekPoint = fileSize - bufferSize;
  if (seekPoint < 0) {
    // It is hard to imagine such a small HFile.
    seekPoint = 0;
    bufferSize = (int) fileSize;
  }

  istream.seek(seekPoint);
  ByteBuffer buf = ByteBuffer.allocate(bufferSize);
  istream.readFully(buf.array(), buf.arrayOffset(),
      buf.arrayOffset() + buf.limit());

  // Read the version from the last int of the file.
  buf.position(buf.limit() - Bytes.SIZEOF_INT);
  int version = buf.getInt();

  // Extract the major and minor versions.
  int majorVersion = extractMajorVersion(version);
  int minorVersion = extractMinorVersion(version);

  HFile.checkFormatVersion(majorVersion); // throws IAE if invalid

  int trailerSize = getTrailerSize(majorVersion);

  FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion);
  fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(),
      buf.arrayOffset() + bufferSize - trailerSize, trailerSize)));
  return fft;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:FixedFileTrailer.java

示例9: readBytesToString

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
/**
 * Read in "length" bytes, convert to an ascii string
 * @param fs filesystem
 * @param path path to read
 * @param length #of bytes to read.
 * @return the bytes read and converted to a string
 * @throws IOException
 */
public static String readBytesToString(FileSystem fs,
                                Path path,
                                int length) throws IOException {
  FSDataInputStream in = fs.open(path);
  try {
    byte[] buf = new byte[length];
    in.readFully(0, buf);
    return toChar(buf);
  } finally {
    in.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:SwiftTestUtils.java

示例10: readFile

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private byte[] readFile(Path inputPath, long len) throws Exception {
  FSDataInputStream fsIn = null;
  try {
    fsIn = fs.open(inputPath);
    // state data will not be that "long"
    byte[] data = new byte[(int) len];
    fsIn.readFully(data);
    return data;
  } finally {
    IOUtils.cleanup(LOG, fsIn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:FileSystemRMStateStore.java

示例11: readAllWithReadFully

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private static byte[] readAllWithReadFully(int totalLength, FSDataInputStream fsdis, boolean close)
    throws IOException {
  final ByteArrayOutputStream baos = new ByteArrayOutputStream();
  // Simulate reading of some data structures of known length:
  final byte[] buffer = new byte[17];
  final int times = totalLength / buffer.length;
  final int remainder = totalLength % buffer.length;
  // it would be simpler to leave the position tracking to the 
  // InputStream, but we need to check the methods #readFully(2) 
  // and #readFully(4) that receive the position as a parameter:
  int position = 0;
  try {
    // read "data structures":
    for (int i=0; i<times; i++) {
      fsdis.readFully(position, buffer);
      position += buffer.length;
      baos.write(buffer);
    }
    if (remainder > 0) {
      // read the remainder:
      fsdis.readFully(position, buffer, 0, remainder);
      position += remainder;
      baos.write(buffer, 0, remainder);
    }
    try {
      fsdis.readFully(position, buffer, 0, 1);
      assertTrue(false);
    } catch (IOException ioe) {
      // okay
    }
    assertEquals(totalLength, position);
    final byte[] result = baos.toByteArray();
    assertEquals(totalLength, result.length);
    return result;
  } finally {
    if (close) {
      fsdis.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestHadoopArchives.java

示例12: readSplitMetaInfo

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
public static JobSplit.TaskSplitMetaInfo[] readSplitMetaInfo(
    JobID jobId, FileSystem fs, Configuration conf, Path jobSubmitDir) 
throws IOException {
  long maxMetaInfoSize = conf.getLong(MRJobConfig.SPLIT_METAINFO_MAXSIZE,
      MRJobConfig.DEFAULT_SPLIT_METAINFO_MAXSIZE);
  Path metaSplitFile = JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir);
  String jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDir).toString();
  FileStatus fStatus = fs.getFileStatus(metaSplitFile);
  if (maxMetaInfoSize > 0 && fStatus.getLen() > maxMetaInfoSize) {
    throw new IOException("Split metadata size exceeded " +
        maxMetaInfoSize +". Aborting job " + jobId);
  }
  FSDataInputStream in = fs.open(metaSplitFile);
  byte[] header = new byte[JobSplit.META_SPLIT_FILE_HEADER.length];
  in.readFully(header);
  if (!Arrays.equals(JobSplit.META_SPLIT_FILE_HEADER, header)) {
    throw new IOException("Invalid header on split file");
  }
  int vers = WritableUtils.readVInt(in);
  if (vers != JobSplit.META_SPLIT_VERSION) {
    in.close();
    throw new IOException("Unsupported split version " + vers);
  }
  int numSplits = WritableUtils.readVInt(in); //TODO: check for insane values
  JobSplit.TaskSplitMetaInfo[] allSplitMetaInfo = 
    new JobSplit.TaskSplitMetaInfo[numSplits];
  for (int i = 0; i < numSplits; i++) {
    JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo();
    splitMetaInfo.readFields(in);
    JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex(
        jobSplitFile, 
        splitMetaInfo.getStartOffset());
    allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex, 
        splitMetaInfo.getLocations(), 
        splitMetaInfo.getInputDataLength());
  }
  in.close();
  return allSplitMetaInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:SplitMetaInfoReader.java

示例13: getFileContentsUsingDfs

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private byte[] getFileContentsUsingDfs(String fileName, int len)
    throws Exception {
  final FSDataInputStream in = hdfs.open(new Path(fileName));
  final byte[] ret = new byte[len];
  in.readFully(ret);
  try {
    in.readByte();
    Assert.fail("expected end of file");
  } catch (EOFException e) {
    // expected. Unfortunately there is no associated message to check
  }
  in.close();
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestRpcProgramNfs3.java

示例14: testOverwrite

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
@Test(timeout=100000)
public void testOverwrite() {
  byte[] contents1 = "contents1".getBytes();
  byte[] contents2 = "contents2".getBytes();
  Assert.assertEquals(contents1.length, contents2.length);
  
  try {
    addEntries(listFile, "srcdir");
    createWithContents("srcdir/file1", contents1);
    createWithContents("dstdir/file1", contents2);
    
    Path target = new Path(root + "/dstdir");
    runTest(listFile, target, false, false, false, true);
    
    checkResult(target, 1, "file1");
    
    // make sure dstdir/file1 has been overwritten with the contents
    // of srcdir/file1
    FSDataInputStream is = fs.open(new Path(root + "/dstdir/file1"));
    byte[] dstContents = new byte[contents1.length];
    is.readFully(dstContents);
    is.close();
    Assert.assertArrayEquals(contents1, dstContents);
  } catch (IOException e) {
    LOG.error("Exception encountered while running distcp", e);
    Assert.fail("distcp failure");
  } finally {
    TestDistCpUtils.delete(fs, root);
    TestDistCpUtils.delete(fs, "target/tmp1");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestIntegration.java

示例15: readFile

import org.apache.hadoop.fs.FSDataInputStream; //导入方法依赖的package包/类
private static byte [] readFile(final FileSystem fs, final Path path) throws IOException {
  FSDataInputStream tmpIn = fs.open(path);
  try {
    byte [] rawData = new byte[tmpIn.available()];
    tmpIn.readFully(rawData);
    return rawData;
  } finally {
    tmpIn.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:RegionSplitter.java


注:本文中的org.apache.hadoop.fs.FSDataInputStream.readFully方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。