本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader类的典型用法代码示例。如果您正苦于以下问题:Java PacketHeader类的具体用法?Java PacketHeader怎么用?Java PacketHeader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
PacketHeader类属于org.apache.hadoop.hdfs.protocol.datatransfer包,在下文中一共展示了PacketHeader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
final byte[] buf;
final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
try {
buf = byteArrayManager.newByteArray(bufferSize);
} catch (InterruptedException ie) {
final InterruptedIOException iioe = new InterruptedIOException(
"seqno=" + seqno);
iioe.initCause(ie);
throw iioe;
}
return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
getChecksumSize(), lastPacketInBlock);
}
示例2: DFSPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
* Create a new packet.
*
* @param buf the buffer storing data and checksums
* @param chunksPerPkt maximum number of chunks per packet.
* @param offsetInBlock offset in bytes into the HDFS block.
* @param seqno the sequence number of this packet
* @param checksumSize the size of checksum
* @param lastPacketInBlock if this is the last packet
*/
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
int checksumSize, boolean lastPacketInBlock) {
this.lastPacketInBlock = lastPacketInBlock;
this.numChunks = 0;
this.offsetInBlock = offsetInBlock;
this.seqno = seqno;
this.buf = buf;
checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
checksumPos = checksumStart;
dataStart = checksumStart + (chunksPerPkt * checksumSize);
dataPos = dataStart;
maxChunks = chunksPerPkt;
}
示例3: writeZeroLengthPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
PacketHeader hdr = new PacketHeader(
8, // size of packet
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new int[] {PipelineAck.combineHeader
(PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
(recvOut);
sendRecvData(description, false);
}
示例4: testPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
@Test
public void testPacket() throws Exception {
Random r = new Random(12345L);
byte[] data = new byte[chunkSize];
r.nextBytes(data);
byte[] checksum = new byte[checksumSize];
r.nextBytes(checksum);
DataOutputBuffer os = new DataOutputBuffer(data.length * 2);
byte[] packetBuf = new byte[data.length * 2];
DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
0, 0, checksumSize, false);
p.setSyncBlock(true);
p.writeData(data, 0, data.length);
p.writeChecksum(checksum, 0, checksum.length);
p.writeTo(os);
//we have set syncBlock to true, so the header has the maximum length
int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
byte[] readBuf = os.getData();
assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);
}
示例5: createPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
long offsetInBlock, long seqno, boolean lastPacketInBlock)
throws InterruptedIOException {
final byte[] buf;
final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
try {
buf = byteArrayManager.newByteArray(bufferSize);
} catch (InterruptedException ie) {
final InterruptedIOException iioe = new InterruptedIOException(
"seqno=" + seqno);
iioe.initCause(ie);
throw iioe;
}
return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
getChecksumSize(), lastPacketInBlock);
}
示例6: DFSPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
* Create a new packet.
*
* @param buf the buffer storing data and checksums
* @param chunksPerPkt maximum number of chunks per packet.
* @param offsetInBlock offset in bytes into the HDFS block.
* @param seqno the sequence number of this packet
* @param checksumSize the size of checksum
* @param lastPacketInBlock if this is the last packet
*/
public DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
int checksumSize, boolean lastPacketInBlock) {
this.lastPacketInBlock = lastPacketInBlock;
this.numChunks = 0;
this.offsetInBlock = offsetInBlock;
this.seqno = seqno;
this.buf = buf;
checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
checksumPos = checksumStart;
dataStart = checksumStart + (chunksPerPkt * checksumSize);
dataPos = dataStart;
maxChunks = chunksPerPkt;
}
示例7: createPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private Packet createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
long seqno) throws InterruptedIOException {
final byte[] buf;
final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
try {
buf = byteArrayManager.newByteArray(bufferSize);
} catch (InterruptedException ie) {
final InterruptedIOException iioe = new InterruptedIOException(
"seqno=" + seqno);
iioe.initCause(ie);
throw iioe;
}
return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize());
}
示例8: Packet
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
* Create a new packet.
*
* @param pktSize maximum size of the packet,
* including checksum data and actual data.
* @param chunksPerPkt maximum number of chunks per packet.
* @param offsetInBlock offset in bytes into the HDFS block.
*/
private Packet(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
int checksumSize) {
this.lastPacketInBlock = false;
this.numChunks = 0;
this.offsetInBlock = offsetInBlock;
this.seqno = seqno;
this.buf = buf;
checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
checksumPos = checksumStart;
dataStart = checksumStart + (chunksPerPkt * checksumSize);
dataPos = dataStart;
maxChunks = chunksPerPkt;
}
示例9: writeZeroLengthPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
PacketHeader hdr = new PacketHeader(
8, // size of packet
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
sendRecvData(description, false);
}
示例10: Packet
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
* Create a new packet.
*
* @param pktSize maximum size of the packet, including checksum data and actual data.
* @param chunksPerPkt maximum number of chunks per packet.
* @param offsetInBlock offset in bytes into the HDFS block.
*/
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
this.lastPacketInBlock = false;
this.numChunks = 0;
this.offsetInBlock = offsetInBlock;
this.seqno = currentSeqno;
currentSeqno++;
buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];
checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
checksumPos = checksumStart;
dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
dataPos = dataStart;
maxChunks = chunksPerPkt;
}
示例11: Packet
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
/**
* Create a new packet.
*
* @param pktSize
* maximum size of the packet, including checksum data and actual data.
* @param chunksPerPkt
* maximum number of chunks per packet.
* @param offsetInBlock
* offset in bytes into the HDFS block.
*/
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
this.lastPacketInBlock = false;
this.numChunks = 0;
this.offsetInBlock = offsetInBlock;
this.seqno = currentSeqno;
currentSeqno++;
buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];
checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
checksumPos = checksumStart;
dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
dataPos = dataStart;
maxChunks = chunksPerPkt;
}
示例12: writeZeroLengthPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
PacketHeader hdr = new PacketHeader(8, // size of packet
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
sendRecvData(description, false);
}
示例13: userEventTriggered
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
IdleStateEvent e = (IdleStateEvent) evt;
if (e.state() == READER_IDLE) {
failed(ctx.channel(),
() -> new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
} else if (e.state() == WRITER_IDLE) {
PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false);
int len = heartbeat.getSerializedSize();
ByteBuf buf = alloc.buffer(len);
heartbeat.putInBuffer(buf.nioBuffer(0, len));
buf.writerIndex(len);
ctx.channel().writeAndFlush(buf);
}
return;
}
super.userEventTriggered(ctx, evt);
}
示例14: computePacketChunkSize
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void computePacketChunkSize(int psize, int csize) {
final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
final int chunkSize = csize + getChecksumSize();
chunksPerPacket = Math.max(bodySize/chunkSize, 1);
packetSize = chunkSize*chunksPerPacket;
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
", chunkSize=" + chunkSize +
", chunksPerPacket=" + chunksPerPacket +
", packetSize=" + packetSize);
}
}
示例15: readTrailingEmptyPacket
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入依赖的package包/类
private void readTrailingEmptyPacket() throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Reading empty packet at end of read");
}
packetReceiver.receiveNextPacket(in);
PacketHeader trailer = packetReceiver.getHeader();
if (!trailer.isLastPacketInBlock() ||
trailer.getDataLen() != 0) {
throw new IOException("Expected empty end-of-read packet! Header: " +
trailer);
}
}