当前位置: 首页>>代码示例>>Java>>正文


Java IOUtils.readFully方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.IOUtils.readFully方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.readFully方法的具体用法?Java IOUtils.readFully怎么用?Java IOUtils.readFully使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.IOUtils的用法示例。


在下文中一共展示了IOUtils.readFully方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readIntoArray

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private int readIntoArray(byte[] to, int offset, Dictionary dict) throws IOException {
  byte status = (byte)in.read();
  if (status == Dictionary.NOT_IN_DICTIONARY) {
    // status byte indicating that data to be read is not in dictionary.
    // if this isn't in the dictionary, we need to add to the dictionary.
    int length = StreamUtils.readRawVarint32(in);
    IOUtils.readFully(in, to, offset, length);
    dict.addEntry(to, offset, length);
    return length;
  } else {
    // the status byte also acts as the higher order byte of the dictionary entry.
    short dictIdx = StreamUtils.toShort(status, (byte)in.read());
    byte[] entry = dict.getEntry(dictIdx);
    if (entry == null) {
      throw new IOException("Missing dictionary entry for index " + dictIdx);
    }
    // now we write the uncompressed value.
    Bytes.putBytes(to, offset, entry, 0, entry.length);
    return entry.length;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:WALCellCodec.java

示例2: wrapIfNecessary

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Wraps a given FSDataInputStream with a CryptoInputStream. The size of the
 * data buffer required for the stream is specified by the
 * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
 * variable.
 * 
 * @param conf
 * @param in
 * @return FSDataInputStream
 * @throws IOException
 */
public static FSDataInputStream wrapIfNecessary(Configuration conf,
    FSDataInputStream in) throws IOException {
  if (isEncryptedSpillEnabled(conf)) {
    CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
    int bufferSize = getBufferSize(conf);
    // Not going to be used... but still has to be read...
    // Since the O/P stream always writes it..
    IOUtils.readFully(in, new byte[8], 0, 8);
    byte[] iv = 
        new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
    IOUtils.readFully(in, iv, 0, 
        cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
    if (LOG.isDebugEnabled()) {
      LOG.debug("IV read from Stream ["
          + Base64.encodeBase64URLSafeString(iv) + "]");
    }
    return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize,
        getEncryptionKey(), iv);
  } else {
    return in;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:CryptoUtils.java

示例3: checkFile

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/** Check that the given file exists with the given contents */
private static void checkFile(File f, String expectedContents) 
    throws IOException {
  FileInputStream fi = new FileInputStream(f);
  int len = expectedContents.length();
  byte[] b = new byte[len];
  try {
    IOUtils.readFully(fi, b, 0, len);
  } catch (IOException ie) {
    fail("Reading "+f.getName()+" failed with "+ie.getMessage());
  } finally {
    fi.close(); // NB: leaving f unclosed prevents unmount
  }
  String s = new String(b, 0, len);
  assertEquals("File content differs", expectedContents, s);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestFuseDFS.java

示例4: seekReadFile

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void seekReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name, 4096);
  byte[] expected = new byte[ONEMB];
  Random rand = new Random(seed);
  rand.nextBytes(expected);
  
  // First read 128 bytes to set count in BufferedInputStream
  byte[] actual = new byte[128];
  stm.read(actual, 0, actual.length);
  // Now read a byte array that is bigger than the internal buffer
  actual = new byte[100000];
  IOUtils.readFully(stm, actual, 0, actual.length);
  checkAndEraseData(actual, 128, expected, "First Read Test");
  // now do a small seek, within the range that is already read
  stm.seek(96036); // 4 byte seek
  actual = new byte[128];
  IOUtils.readFully(stm, actual, 0, actual.length);
  checkAndEraseData(actual, 96036, expected, "Seek Bug");
  // all done
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSeekBug.java

示例5: readFile

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public byte[] readFile(Path path, int len) throws IOException {
  DataInputStream dis = fc.open(path);
  byte[] buffer = new byte[len];
  IOUtils.readFully(dis, buffer, 0, len);
  dis.close();
  return buffer;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:FileContextTestWrapper.java

示例6: readAndCompare

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void readAndCompare(FSDataInputStream in, int position, int len)
    throws IOException {
  byte[] b = new byte[len];
  in.seek(position);
  IOUtils.readFully(in, b, 0, b.length);

  for (int i = 0; i < b.length; i++) {
    assertEquals(expected[position + i], b[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestFSInputChecker.java

示例7: compareFiles

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public static boolean compareFiles(FileStatus f1, FileStatus f2) throws Exception {
  byte[] original = new byte[(int)f1.getLen()];
  byte[] withDict = new byte[(int)f2.getLen()];

  try (FSDataInputStream in1 = localFs.open(f1.getPath()); FSDataInputStream in2 = localFs.open(f2.getPath());) {
    IOUtils.readFully(in1, original, 0, original.length);
    IOUtils.readFully(in2, withDict, 0, withDict.length);
  }

  return Arrays.equals(original, withDict);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:12,代码来源:BaseTestQuery.java

示例8: doRead

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private int doRead(byte[]b, int off, int len) throws IOException {
  
  // If we are trying to read past the end of data, just read
  // the left over data
  if (currentOffset + len > dataLength) {
    len = (int) dataLength - (int)currentOffset;
  }
  
  int bytesRead = in.read(b, off, len);

  if (bytesRead < 0) {
    throw new ChecksumException("Checksum Error", 0);
  }
  
  sum.update(b,off,bytesRead);

  currentOffset += bytesRead;

  if (disableChecksumValidation) {
    return bytesRead;
  }
  
  if (currentOffset == dataLength) {
    // The last four bytes are checksum. Strip them and verify
    csum = new byte[checksumSize];
    IOUtils.readFully(in, csum, 0, checksumSize);
    if (!sum.compare(csum, 0)) {
      throw new ChecksumException("Checksum Error", 0);
    }
  }
  return bytesRead;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:IFileInputStream.java

示例9: readFile

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public static byte[] readFile(FileContext fc, Path path, int len)
    throws IOException {
  DataInputStream dis = fc.open(path);
  byte[] buffer = new byte[len];
  IOUtils.readFully(dis, buffer, 0, len);
  dis.close();
  return buffer;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:FileContextTestHelper.java

示例10: calcPartialBlockChecksum

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }
  
  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DataXceiver.java

示例11: next

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override
public boolean next(GridmixKey key, GridmixRecord val) throws IOException {
  if (!factory.next(key, val)) {
    return false;
  }
  for (int len = (null == key ? 0 : key.getSize()) + val.getSize();
       len > 0; len -= buf.length) {
    IOUtils.readFully(src, buf, 0, Math.min(buf.length, len));
  }
  return true;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:ReadRecordFactory.java

示例12: loadINodeSection

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private static byte[][] loadINodeSection(InputStream in)
        throws IOException {
  FsImageProto.INodeSection s = FsImageProto.INodeSection
      .parseDelimitedFrom(in);
  LOG.info("Loading " + s.getNumInodes() + " inodes.");
  final byte[][] inodes = new byte[(int) s.getNumInodes()][];

  for (int i = 0; i < s.getNumInodes(); ++i) {
    int size = CodedInputStream.readRawVarint32(in.read(), in);
    byte[] bytes = new byte[size];
    IOUtils.readFully(in, bytes, 0, size);
    inodes[i] = bytes;
  }
  LOG.debug("Sorting inodes");
  Arrays.sort(inodes, INODE_BYTES_COMPARATOR);
  LOG.debug("Finished sorting inodes");
  return inodes;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FSImageLoader.java

示例13: testShortCircuitTraceHooks

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test
public void testShortCircuitTraceHooks() throws IOException {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
      SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
      TestTracing.SetSpanReceiver.class.getName());
  conf.setLong("dfs.blocksize", 100 * 1024);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "testShortCircuitTraceHooks._PORT");
  conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1)
      .build();
  dfs = cluster.getFileSystem();

  try {
    DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short)1, 5678L);

    TraceScope ts = Trace.startSpan("testShortCircuitTraceHooks", Sampler.ALWAYS);
    FSDataInputStream stream = dfs.open(TEST_PATH);
    byte buf[] = new byte[TEST_LENGTH];
    IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
    stream.close();
    ts.close();

    String[] expectedSpanNames = {
      "OpRequestShortCircuitAccessProto",
      "ShortCircuitShmRequestProto"
    };
    TestTracing.assertSpanNamesFound(expectedSpanNames);
  } finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestTracingShortCircuitLocalRead.java

示例14: read

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override
public int read(byte[] b, int off, int len) throws IOException {
  int kvread = 0;
  while (kvread < len) {
    if (curlen <= 0) {
      nextSource();
      continue;
    }
    final int srcRead = (int) Math.min(len - kvread, curlen);
    IOUtils.readFully(input, b, kvread, srcRead);
    curlen -= srcRead;
    kvread += srcRead;
  }
  return kvread;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:FileQueue.java

示例15: scan

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Scan dbDirectory to read kvstore definitions and load all stores in memory.
 */
public void scan() throws Exception {
  final FileSystem fs = FileSystem.getLocal(new Configuration());
  final FileStatus[] metaDataFiles = fs.listStatus(new Path(metaDataFilesDir.getPath()), METADATA_FILES_GLOB);
  for (FileStatus fileStatus : metaDataFiles) {
    final byte[] headerBytes = new byte[(int) fileStatus.getLen()];
    IOUtils.readFully(fs.open(fileStatus.getPath()), headerBytes, 0, headerBytes.length);
    final KVStoreInfo metadata = new KVStoreInfo();
    ProtostuffUtil.fromJSON(headerBytes, metadata, KVStoreInfo.getSchema(), false);

    final StoreBuilderConfig storeBuilderConfig = DataStoreUtils.toBuilderConfig(metadata);
    getOrCreateStore(storeBuilderConfig);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:17,代码来源:CoreStoreProviderImpl.java


注:本文中的org.apache.hadoop.io.IOUtils.readFully方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。