本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader.putInBuffer方法的典型用法代码示例。如果您正苦于以下问题:Java PacketHeader.putInBuffer方法的具体用法?Java PacketHeader.putInBuffer怎么用?Java PacketHeader.putInBuffer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader
的用法示例。
在下文中一共展示了PacketHeader.putInBuffer方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: userEventTriggered
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入方法依赖的package包/类
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
IdleStateEvent e = (IdleStateEvent) evt;
if (e.state() == READER_IDLE) {
failed(ctx.channel(),
() -> new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
} else if (e.state() == WRITER_IDLE) {
PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false);
int len = heartbeat.getSerializedSize();
ByteBuf buf = alloc.buffer(len);
heartbeat.putInBuffer(buf.nioBuffer(0, len));
buf.writerIndex(len);
ctx.channel().writeAndFlush(buf);
}
return;
}
super.userEventTriggered(ctx, evt);
}
示例2: writePacketHeader
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入方法依赖的package包/类
/**
* Write packet header into {@code pkt},
* return the length of the header written.
*/
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
pkt.clear();
// both syncBlock and syncPacket are false
PacketHeader header = new PacketHeader(packetLen, offset, seqno,
(dataLen == 0), dataLen, false);
int size = header.getSerializedSize();
pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
header.putInBuffer(pkt);
return size;
}
示例3: writePacketHeader
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入方法依赖的package包/类
/**
* Write packet header into {@code pkt},
* return the length of the header written.
*/
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
pkt.clear();
// both syncBlock and syncPacket are false
PacketHeader header =
new PacketHeader(packetLen, offset, seqno, (dataLen == 0), dataLen,
false);
int size = header.getSerializedSize();
pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
header.putInBuffer(pkt);
return size;
}
示例4: flushBuffer
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; //导入方法依赖的package包/类
private void flushBuffer(CompletableFuture<Long> future, ByteBuf dataBuf,
long nextPacketOffsetInBlock, boolean syncBlock) {
int dataLen = dataBuf.readableBytes();
int chunkLen = summer.getBytesPerChecksum();
int trailingPartialChunkLen = dataLen % chunkLen;
int numChecks = dataLen / chunkLen + (trailingPartialChunkLen != 0 ? 1 : 0);
int checksumLen = numChecks * summer.getChecksumSize();
ByteBuf checksumBuf = alloc.directBuffer(checksumLen);
summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, checksumLen));
checksumBuf.writerIndex(checksumLen);
PacketHeader header = new PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
nextPacketSeqno, false, dataLen, syncBlock);
int headerLen = header.getSerializedSize();
ByteBuf headerBuf = alloc.buffer(headerLen);
header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
headerBuf.writerIndex(headerLen);
Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeList);
waitingAckQueue.addLast(c);
// recheck again after we pushed the callback to queue
if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) {
future.completeExceptionally(new IOException("stream already broken"));
// it's the one we have just pushed or just a no-op
waitingAckQueue.removeFirst();
return;
}
datanodeList.forEach(ch -> {
ch.write(headerBuf.retainedDuplicate());
ch.write(checksumBuf.retainedDuplicate());
ch.writeAndFlush(dataBuf.retainedDuplicate());
});
checksumBuf.release();
headerBuf.release();
dataBuf.release();
nextPacketSeqno++;
}