当前位置: 首页>>代码示例>>Java>>正文


Java PacketHeader类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader的典型用法代码示例。如果您正苦于以下问题:Java PacketHeader类的具体用法?Java PacketHeader怎么用?Java PacketHeader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


PacketHeader类属于org.apache.hadoop.hdfs.protocol.datatransfer包,在下文中一共展示了PacketHeader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
                       getChecksumSize(), lastPacketInBlock);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DFSOutputStream.java

示例2: DFSPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DFSPacket.java

示例3: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDataTransferProtocol.java

示例4: testPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDFSPacket.java

示例5: createPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
    long offsetInBlock, long seqno, boolean lastPacketInBlock)
    throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
      getChecksumSize(), lastPacketInBlock);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:DFSOutputStream.java

示例6: DFSPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
public DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:DFSPacket.java

示例7: createPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private Packet createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:DFSOutputStream.java

示例8: Packet

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, 
 *                including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
private Packet(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
    int checksumSize) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:DFSOutputStream.java

示例9: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:TestDataTransferProtocol.java

示例10: Packet

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;
  
  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];
  
  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:23,代码来源:DFSOutputStream.java

示例11: Packet

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
 * Create a new packet.
 *
 * @param pktSize
 *     maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt
 *     maximum number of chunks per packet.
 * @param offsetInBlock
 *     offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:DFSOutputStream.java

示例12: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
    throws IOException {
  PacketHeader hdr = new PacketHeader(8,                   // size of packet
      block.getNumBytes(), // OffsetInBlock
      100,                 // sequencenumber
      true,                // lastPacketInBlock
      0,                   // chunk length
      false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:TestDataTransferProtocol.java

示例13: userEventTriggered

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
  if (evt instanceof IdleStateEvent) {
    IdleStateEvent e = (IdleStateEvent) evt;
    if (e.state() == READER_IDLE) {
      failed(ctx.channel(),
        () -> new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
    } else if (e.state() == WRITER_IDLE) {
      PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false);
      int len = heartbeat.getSerializedSize();
      ByteBuf buf = alloc.buffer(len);
      heartbeat.putInBuffer(buf.nioBuffer(0, len));
      buf.writerIndex(len);
      ctx.channel().writeAndFlush(buf);
    }
    return;
  }
  super.userEventTriggered(ctx, evt);
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:FanOutOneBlockAsyncDFSOutput.java

示例14: computePacketChunkSize

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
              ", chunkSize=" + chunkSize +
              ", chunksPerPacket=" + chunksPerPacket +
              ", packetSize=" + packetSize);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:DFSOutputStream.java

示例15: readTrailingEmptyPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void readTrailingEmptyPacket() throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Reading empty packet at end of read");
  }
  
  packetReceiver.receiveNextPacket(in);

  PacketHeader trailer = packetReceiver.getHeader();
  if (!trailer.isLastPacketInBlock() ||
     trailer.getDataLen() != 0) {
    throw new IOException("Expected empty end-of-read packet! Header: " +
                          trailer);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:RemoteBlockReader2.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。