当前位置: 首页>>代码示例>>Java>>正文


Java PacketHeader.PKT_MAX_HEADER_LEN属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader.PKT_MAX_HEADER_LEN属性的典型用法代码示例。如果您正苦于以下问题:Java PacketHeader.PKT_MAX_HEADER_LEN属性的具体用法?Java PacketHeader.PKT_MAX_HEADER_LEN怎么用?Java PacketHeader.PKT_MAX_HEADER_LEN使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader的用法示例。


在下文中一共展示了PacketHeader.PKT_MAX_HEADER_LEN属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createPacket

/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
                       getChecksumSize(), lastPacketInBlock);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:DFSOutputStream.java

示例2: DFSPacket

/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSPacket.java

示例3: testPacket

@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestDFSPacket.java

示例4: createPacket

/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
    long offsetInBlock, long seqno, boolean lastPacketInBlock)
    throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
      getChecksumSize(), lastPacketInBlock);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:DFSOutputStream.java

示例5: DFSPacket

/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
public DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:DFSPacket.java

示例6: createPacket

/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private Packet createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:DFSOutputStream.java

示例7: Packet

/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, 
 *                including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
private Packet(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
    int checksumSize) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:DFSOutputStream.java

示例8: Packet

/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;
  
  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];
  
  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:DFSOutputStream.java

示例9: Packet

/**
 * Create a new packet.
 *
 * @param pktSize
 *     maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt
 *     maximum number of chunks per packet.
 * @param offsetInBlock
 *     offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:DFSOutputStream.java

示例10: computePacketChunkSize

private void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
              ", chunkSize=" + chunkSize +
              ", chunksPerPacket=" + chunksPerPacket +
              ", packetSize=" + packetSize);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:DFSOutputStream.java

示例11: computePacketChunkSize

protected void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  DFSClient.LOG.debug("computePacketChunkSize: src={}, chunkSize={}, "
          + "chunksPerPacket={}, packetSize={}",
      src, chunkSize, chunksPerPacket, packetSize);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:9,代码来源:DFSOutputStream.java

示例12: createHeartbeatPacket

/**
 * For heartbeat packets, create buffer directly by new byte[]
 * since heartbeats should not be blocked.
 */
private DFSPacket createHeartbeatPacket() throws InterruptedIOException {
  final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
  return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO,
                       getChecksumSize(), false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:DFSOutputStream.java

示例13: doSendBlock

private long doSendBlock(DataOutputStream out, OutputStream baseStream,
      DataTransferThrottler throttler) throws IOException {
  if (out == null) {
    throw new IOException( "out stream is null" );
  }
  initialOffset = offset;
  long totalRead = 0;
  OutputStream streamForSendChunks = out;
  
  lastCacheDropOffset = initialOffset;

  if (isLongRead() && blockInFd != null) {
    // Advise that this file descriptor will be accessed sequentially.
    NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
        block.getBlockName(), blockInFd, 0, 0,
        NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
  }
  
  // Trigger readahead of beginning of file if configured.
  manageOsCache();

  final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
  try {
    int maxChunksPerPacket;
    int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
    boolean transferTo = transferToAllowed && !verifyChecksum
        && baseStream instanceof SocketOutputStream
        && blockIn instanceof FileInputStream;
    if (transferTo) {
      FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
      blockInPosition = fileChannel.position();
      streamForSendChunks = baseStream;
      maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
      
      // Smaller packet size to only hold checksum when doing transferTo
      pktBufSize += checksumSize * maxChunksPerPacket;
    } else {
      maxChunksPerPacket = Math.max(1,
          numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE));
      // Packet size includes both checksum and data
      pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket;
    }

    ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);

    while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
      manageOsCache();
      long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
          transferTo, throttler);
      offset += len;
      totalRead += len + (numberOfChunks(len) * checksumSize);
      seqno++;
    }
    // If this thread was interrupted, then it did not send the full block.
    if (!Thread.currentThread().isInterrupted()) {
      try {
        // send an empty packet to mark the end of the block
        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
            throttler);
        out.flush();
      } catch (IOException e) { //socket error
        throw ioeToSocketException(e);
      }

      sentEntireByteRange = true;
    }
  } finally {
    if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
      final long endTime = System.nanoTime();
      ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
          initialOffset, endTime - startTime));
    }
    close();
  }
  return totalRead;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:BlockSender.java

示例14: createHeartbeatPacket

/**
 * For heartbeat packets, create buffer directly by new byte[]
 * since heartbeats should not be blocked.
 */
private DFSPacket createHeartbeatPacket() {
  final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
  return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO, 0, false);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:DataStreamer.java

示例15: doSendBlock

private long doSendBlock(DataOutputStream out, OutputStream baseStream,
      DataTransferThrottler throttler) throws IOException {
  if (out == null) {
    throw new IOException( "out stream is null" );
  }
  initialOffset = offset;
  long totalRead = 0;
  OutputStream streamForSendChunks = out;
  
  lastCacheDropOffset = initialOffset;

  if (isLongRead() && blockInFd != null) {
    // Advise that this file descriptor will be accessed sequentially.
    NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
        block.getBlockName(), blockInFd, 0, 0, POSIX_FADV_SEQUENTIAL);
  }
  
  // Trigger readahead of beginning of file if configured.
  manageOsCache();

  final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
  try {
    int maxChunksPerPacket;
    int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
    boolean transferTo = transferToAllowed && !verifyChecksum
        && baseStream instanceof SocketOutputStream
        && blockIn instanceof FileInputStream;
    if (transferTo) {
      FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
      blockInPosition = fileChannel.position();
      streamForSendChunks = baseStream;
      maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
      
      // Smaller packet size to only hold checksum when doing transferTo
      pktBufSize += checksumSize * maxChunksPerPacket;
    } else {
      maxChunksPerPacket = Math.max(1,
          numberOfChunks(IO_FILE_BUFFER_SIZE));
      // Packet size includes both checksum and data
      pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket;
    }

    ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);

    while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
      manageOsCache();
      long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
          transferTo, throttler);
      offset += len;
      totalRead += len + (numberOfChunks(len) * checksumSize);
      seqno++;
    }
    // If this thread was interrupted, then it did not send the full block.
    if (!Thread.currentThread().isInterrupted()) {
      try {
        // send an empty packet to mark the end of the block
        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
            throttler);
        out.flush();
      } catch (IOException e) { //socket error
        throw ioeToSocketException(e);
      }

      sentEntireByteRange = true;
    }
  } finally {
    if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
      final long endTime = System.nanoTime();
      ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
          initialOffset, endTime - startTime));
    }
    close();
  }
  return totalRead;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:75,代码来源:BlockSender.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader.PKT_MAX_HEADER_LEN属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。