当前位置: 首页>>代码示例>>Java>>正文


Java IOUtils.skipFully方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.IOUtils.skipFully方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.skipFully方法的具体用法?Java IOUtils.skipFully怎么用?Java IOUtils.skipFully使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.IOUtils的用法示例。


在下文中一共展示了IOUtils.skipFully方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: write

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override
public void write(OutputStream os) throws IOException {
  IOUtils.skipFully(is, offset);
  if (len == -1) {
    IOUtils.copyBytes(is, os, 4096, true);
  } else {
    IOUtils.copyBytes(is, os, len, true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:InputStreamEntity.java

示例2: verifyTerminator

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void verifyTerminator() throws IOException {
  /** The end of the edit log should contain only 0x00 or 0xff bytes.
   * If it contains other bytes, the log itself may be corrupt.
   * It is important to check this; if we don't, a stray OP_INVALID byte 
   * could make us stop reading the edit log halfway through, and we'd never
   * know that we had lost data.
   */
  byte[] buf = new byte[4096];
  limiter.clearLimit();
  int numRead = -1, idx = 0;
  while (true) {
    try {
      numRead = -1;
      idx = 0;
      numRead = in.read(buf);
      if (numRead == -1) {
        return;
      }
      while (idx < numRead) {
        if ((buf[idx] != (byte)0) && (buf[idx] != (byte)-1)) {
          throw new IOException("Read extra bytes after " +
            "the terminator!");
        }
        idx++;
      }
    } finally {
      // After reading each group of bytes, we reposition the mark one
      // byte before the next group.  Similarly, if there is an error, we
      // want to reposition the mark one byte before the error
      if (numRead != -1) { 
        in.reset();
        IOUtils.skipFully(in, idx);
        in.mark(buf.length + 1);
        IOUtils.skipFully(in, 1);
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:FSEditLogOp.java

示例3: scanOp

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Similar with decodeOp(), but instead of doing the real decoding, we skip
 * the content of the op if the length of the editlog is supported.
 * @return the last txid of the segment, or INVALID_TXID on exception
 */
public long scanOp() throws IOException {
  if (supportEditLogLength) {
    limiter.setLimit(maxOpSize);
    in.mark(maxOpSize);

    final byte opCodeByte;
    try {
      opCodeByte = in.readByte(); // op code
    } catch (EOFException e) {
      return HdfsConstants.INVALID_TXID;
    }

    FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
    if (opCode == OP_INVALID) {
      verifyTerminator();
      return HdfsConstants.INVALID_TXID;
    }

    int length = in.readInt(); // read the length of the op
    long txid = in.readLong(); // read the txid

    // skip the remaining content
    IOUtils.skipFully(in, length - 8); 
    // TODO: do we want to verify checksum for JN? For now we don't.
    return txid;
  } else {
    FSEditLogOp op = decodeOp();
    return op == null ? HdfsConstants.INVALID_TXID : op.getTransactionId();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:FSEditLogOp.java

示例4: checkUnsupportedMethod

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
                                         byte[] expected, int readOffset) throws IOException {
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
  IOUtils.skipFully(stm, readOffset);
  try {
    stm.read(actual);
  } catch(UnsupportedOperationException unex) {
    return true;
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestShortCircuitLocalRead.java

示例5: getBlockInputStream

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override // FsDatasetSpi
public synchronized InputStream getBlockInputStream(ExtendedBlock b,
    long seekOffset) throws IOException {
  InputStream result = getBlockInputStream(b);
  IOUtils.skipFully(result, seekOffset);
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:SimulatedFSDataset.java

示例6: testSkip1

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void testSkip1(int skippedBytes) 
throws Exception {
  long oldPos = stm.getPos();
  IOUtils.skipFully(stm, skippedBytes);
  long newPos = oldPos + skippedBytes;
  assertEquals(stm.getPos(), newPos);
  stm.readFully(actual);
  checkAndEraseData(actual, (int)newPos, expected, "Read Sanity Test");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestFSInputChecker.java

示例7: BlockReaderLocalLegacy

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private BlockReaderLocalLegacy(DFSClient.Conf conf, String hdfsfile,
    ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
    long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
    boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
    FileInputStream checksumIn) throws IOException {
  this.filename = hdfsfile;
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max(startOffset, 0);
  this.blockId = block.getBlockId();

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();

  this.dataIn = dataIn;
  this.checksumIn = checksumIn;
  this.offsetFromChunkBoundary = (int) (startOffset-firstChunkOffset);

  int chunksPerChecksumRead = getSlowReadBufferNumChunks(
      conf.shortCircuitBufferSize, bytesPerChecksum);
  slowReadBuff = bufferPool.getBuffer(bytesPerChecksum * chunksPerChecksumRead);
  checksumBuff = bufferPool.getBuffer(checksumSize * chunksPerChecksumRead);
  // Initially the buffers have nothing to read.
  slowReadBuff.flip();
  checksumBuff.flip();
  boolean success = false;
  try {
    // Skip both input streams to beginning of the chunk containing startOffset
    IOUtils.skipFully(dataIn, firstChunkOffset);
    if (checksumIn != null) {
      long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
      IOUtils.skipFully(checksumIn, checkSumOffset);
    }
    success = true;
  } finally {
    if (!success) {
      bufferPool.returnBuffer(slowReadBuff);
      bufferPool.returnBuffer(checksumBuff);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:BlockReaderLocalLegacy.java

示例8: nextOpImpl

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private FSEditLogOp nextOpImpl(boolean skipBrokenEdits) throws IOException {
  FSEditLogOp op = null;
  switch (state) {
  case UNINIT:
    try {
      init(true);
    } catch (Throwable e) {
      LOG.error("caught exception initializing " + this, e);
      if (skipBrokenEdits) {
        return null;
      }
      Throwables.propagateIfPossible(e, IOException.class);
    }
    Preconditions.checkState(state != State.UNINIT);
    return nextOpImpl(skipBrokenEdits);
  case OPEN:
    op = reader.readOp(skipBrokenEdits);
    if ((op != null) && (op.hasTransactionId())) {
      long txId = op.getTransactionId();
      if ((txId >= lastTxId) &&
          (lastTxId != HdfsConstants.INVALID_TXID)) {
        //
        // Sometimes, the NameNode crashes while it's writing to the
        // edit log.  In that case, you can end up with an unfinalized edit log
        // which has some garbage at the end.
        // JournalManager#recoverUnfinalizedSegments will finalize these
        // unfinished edit logs, giving them a defined final transaction 
        // ID.  Then they will be renamed, so that any subsequent
        // readers will have this information.
        //
        // Since there may be garbage at the end of these "cleaned up"
        // logs, we want to be sure to skip it here if we've read everything
        // we were supposed to read out of the stream.
        // So we force an EOF on all subsequent reads.
        //
        long skipAmt = log.length() - tracker.getPos();
        if (skipAmt > 0) {
          if (LOG.isDebugEnabled()) {
              LOG.debug("skipping " + skipAmt + " bytes at the end " +
                "of edit log  '" + getName() + "': reached txid " + txId +
                " out of " + lastTxId);
          }
          tracker.clearLimit();
          IOUtils.skipFully(tracker, skipAmt);
        }
      }
    }
    break;
    case CLOSED:
      break; // return null
  }
  return op;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:EditLogFileInputStream.java

示例9: testClientMmapDisable

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test
public void testClientMmapDisable() throws Exception {
  HdfsConfiguration conf = initZeroCopyTest();
  conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
  MiniDFSCluster cluster = null;
  final Path TEST_PATH = new Path("/a");
  final int TEST_FILE_LENGTH = 16385;
  final int RANDOM_SEED = 23453;
  final String CONTEXT = "testClientMmapDisable";
  FSDataInputStream fsIn = null;
  DistributedFileSystem fs = null;
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

  try {
    // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
    // mapped reads.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    try {
      fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.fail("expected zero-copy read to fail when client mmaps " +
          "were disabled.");
    } catch (UnsupportedOperationException e) {
    }
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }

  fsIn = null;
  fs = null;
  cluster = null;
  try {
    // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
    conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
    conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    fsIn.releaseBuffer(buf);
    // Test EOF behavior
    IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
    buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(null, buf);
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestEnhancedByteBufferAccess.java

示例10: checkFileContent

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext getClientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  
  FSDataInputStream stm = fs.open(name);
  byte[] actual = new byte[expected.length-readOffset];
  stm.readFully(readOffset, actual);
  checkData(actual, readOffset, expected, "Read 2");
  stm.close();
  // Now read using a different API.
  actual = new byte[expected.length-readOffset];
  stm = fs.open(name);
  IOUtils.skipFully(stm, readOffset);
  //Read a small number of bytes first.
  int nread = stm.read(actual, 0, 3);
  nread += stm.read(actual, nread, 2);
  //Read across chunk boundary
  nread += stm.read(actual, nread, 517);
  checkData(actual, readOffset, expected, nread, "A few bytes");
  //Now read rest of it
  while (nread < actual.length) {
    int nbytes = stm.read(actual, nread, actual.length - nread);
    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(actual, readOffset, expected, "Read 3");
  
  if (legacyShortCircuitFails) {
    assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestShortCircuitLocalRead.java

示例11: checkFileContentDirect

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
    int readOffset, String readingUser, Configuration conf,
    boolean legacyShortCircuitFails)
    throws IOException, InterruptedException {
  // Ensure short circuit is enabled
  DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
  ClientContext clientContext = ClientContext.getFromConf(conf);
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  
  HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);

  ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

  IOUtils.skipFully(stm, readOffset);

  actual.limit(3);

  //Read a small number of bytes first.
  int nread = stm.read(actual);
  actual.limit(nread + 2);
  nread += stm.read(actual);

  // Read across chunk boundary
  actual.limit(Math.min(actual.capacity(), nread + 517));
  nread += stm.read(actual);
  checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
      "A few bytes");
  //Now read rest of it
  actual.limit(actual.capacity());
  while (actual.hasRemaining()) {
    int nbytes = stm.read(actual);

    if (nbytes < 0) {
      throw new EOFException("End of file reached before reading fully.");
    }
    nread += nbytes;
  }
  checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
  if (legacyShortCircuitFails) {
    assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例12: readResponse

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
protected void readResponse() {
  if (shouldCloseConnection.get()) return;
  Call call = null;
  boolean expectedCall = false;
  try {
    // See HBaseServer.Call.setResponse for where we write out the response.
    // Total size of the response.  Unused.  But have to read it in anyways.
    int totalSize = in.readInt();

    // Read the header
    ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in);
    int id = responseHeader.getCallId();
    call = calls.remove(id); // call.done have to be set before leaving this method
    expectedCall = (call != null && !call.done);
    if (!expectedCall) {
      // So we got a response for which we have no corresponding 'call' here on the client-side.
      // We probably timed out waiting, cleaned up all references, and now the server decides
      // to return a response.  There is nothing we can do w/ the response at this stage. Clean
      // out the wire of the response so its out of the way and we can get other responses on
      // this connection.
      int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader);
      int whatIsLeftToRead = totalSize - readSoFar;
      IOUtils.skipFully(in, whatIsLeftToRead);
      if (call != null) {
        call.callStats.setResponseSizeBytes(totalSize);
        call.callStats.setCallTimeMs(
            EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime());
      }
      return;
    }
    if (responseHeader.hasException()) {
      ExceptionResponse exceptionResponse = responseHeader.getException();
      RemoteException re = createRemoteException(exceptionResponse);
      call.setException(re);
      call.callStats.setResponseSizeBytes(totalSize);
      call.callStats.setCallTimeMs(
          EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime());
      if (isFatalConnectionException(exceptionResponse)) {
        markClosed(re);
      }
    } else {
      Message value = null;
      if (call.responseDefaultType != null) {
        Builder builder = call.responseDefaultType.newBuilderForType();
        ProtobufUtil.mergeDelimitedFrom(builder, in);
        value = builder.build();
      }
      CellScanner cellBlockScanner = null;
      if (responseHeader.hasCellBlockMeta()) {
        int size = responseHeader.getCellBlockMeta().getLength();
        byte [] cellBlock = new byte[size];
        IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length);
        cellBlockScanner = ipcUtil.createCellScanner(this.codec, this.compressor, cellBlock);
      }
      call.setResponse(value, cellBlockScanner);
      call.callStats.setResponseSizeBytes(totalSize);
      call.callStats.setCallTimeMs(
          EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime());
    }
  } catch (IOException e) {
    if (expectedCall) call.setException(e);
    if (e instanceof SocketTimeoutException) {
      // Clean up open calls but don't treat this as a fatal condition,
      // since we expect certain responses to not make it by the specified
      // {@link ConnectionId#rpcTimeout}.
      if (LOG.isTraceEnabled()) LOG.trace("ignored", e);
    } else {
      // Treat this as a fatal condition and close this connection
      markClosed(e);
    }
  } finally {
    cleanupCalls(false);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:75,代码来源:RpcClientImpl.java


注:本文中的org.apache.hadoop.io.IOUtils.skipFully方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。