本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DataTransferProtocol类的典型用法代码示例。如果您正苦于以下问题:Java DataTransferProtocol类的具体用法?Java DataTransferProtocol怎么用?Java DataTransferProtocol使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DataTransferProtocol类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了DataTransferProtocol类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: replaceBlock
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
private boolean replaceBlock( Block block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination, int namespaceId) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getName()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK);
out.writeInt(namespaceId);
out.writeLong(block.getBlockId());
out.writeLong(block.getGenerationStamp());
Text.writeString(out, source.getStorageID());
sourceProxy.write(out);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
short status = reply.readShort();
if(status == DataTransferProtocol.OP_STATUS_SUCCESS) {
return true;
}
return false;
}
示例2: closeBlockReader
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
/**
* Close the given BlockReader and cache its socket.
*/
private void closeBlockReader(BlockReader reader, boolean reuseConnection)
throws IOException {
if (reader.hasSentStatusCode()) {
Socket oldSock = reader.takeSocket();
if (dfsClient.getDataTransferProtocolVersion() <
DataTransferProtocol.READ_REUSE_CONNECTION_VERSION ||
!reuseConnection) {
// close the sock for old datanode.
if (oldSock != null) {
IOUtils.closeSocket(oldSock);
}
} else {
socketCache.put(oldSock);
}
}
reader.close();
}
示例3: readBlockSizeInfo
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
/**
* Read the block length information from data stream
*
* @throws IOException
*/
private synchronized void readBlockSizeInfo() throws IOException {
if (!transferBlockSize) {
return;
}
blkLenInfoUpdated = true;
isBlockFinalized = in.readBoolean();
updatedBlockLength = in.readLong();
if (dataTransferVersion >= DataTransferProtocol.READ_PROFILING_VERSION) {
readDataNodeProfilingData();
}
if (LOG.isDebugEnabled()) {
LOG.debug("ifBlockComplete? " + isBlockFinalized + " block size: "
+ updatedBlockLength);
}
}
示例4: createLocatedBlocks
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
LocatedBlocks createLocatedBlocks(List<LocatedBlock> blocks,
BlockMetaInfoType type,int namespaceid, int methodsFingerprint) {
switch (type) {
case VERSION_AND_NAMESPACEID:
return new LocatedBlocksWithMetaInfo(
computeContentSummary().getLength(), blocks,
isUnderConstruction(), DataTransferProtocol.DATA_TRANSFER_VERSION,
namespaceid, methodsFingerprint);
case VERSION:
return new VersionedLocatedBlocks(computeContentSummary().getLength(), blocks,
isUnderConstruction(), DataTransferProtocol.DATA_TRANSFER_VERSION);
default:
return new LocatedBlocks(computeContentSummary().getLength(), blocks,
isUnderConstruction());
}
}
示例5: replaceBlock
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
private boolean replaceBlock( Block block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getName()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK);
out.writeLong(block.getBlockId());
out.writeLong(block.getGenerationStamp());
Text.writeString(out, source.getStorageID());
sourceProxy.write(out);
BlockTokenSecretManager.DUMMY_TOKEN.write(out);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
short status = reply.readShort();
if(status == DataTransferProtocol.OP_STATUS_SUCCESS) {
return true;
}
return false;
}
示例6: testWrite
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
private void testWrite(Block block, BlockConstructionStage stage, long newGS,
String description, Boolean eofExcepted) throws IOException {
sendBuf.reset();
recvBuf.reset();
DataTransferProtocol.Sender.opWriteBlock(sendOut, block, 0, stage, newGS,
block.getNumBytes(), block.getNumBytes(), "cl", null,
new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
if (eofExcepted) {
ERROR.write(recvOut);
sendRecvData(description, true);
} else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
//ok finally write a block with 0 len
SUCCESS.write(recvOut);
Text.writeString(recvOut, ""); // first bad node
sendRecvData(description, false);
} else {
writeZeroLengthPacket(block, description);
}
}
示例7: replaceBlock
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
private boolean replaceBlock( Block block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getName()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
REPLACE_BLOCK.write(out);
out.writeLong(block.getBlockId());
out.writeLong(block.getGenerationStamp());
Text.writeString(out, source.getStorageID());
sourceProxy.write(out);
BlockTokenSecretManager.DUMMY_TOKEN.write(out);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
return DataTransferProtocol.Status.read(reply) == SUCCESS;
}
示例8: replaceBlock
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
private boolean replaceBlock( Block block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getName()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK);
out.writeLong(block.getBlockId());
out.writeLong(block.getGenerationStamp());
Text.writeString(out, source.getStorageID());
sourceProxy.write(out);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
short status = reply.readShort();
if(status == DataTransferProtocol.OP_STATUS_SUCCESS) {
return true;
}
return false;
}
示例9: register
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNode.versionRequest();
dnRegistration.setStorageInfo(new DataStorage(nsInfo, "", null), "");
String storageId = DataNode.createNewStorageId(dnRegistration.getPort());
dnRegistration.setStorageID(storageId);
// register datanode
dnRegistration = nameNode.register(dnRegistration,
DataTransferProtocol.DATA_TRANSFER_VERSION);
}
示例10: sendRequest
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
/**
* Send a block replace request to the output stream
*/
private void sendRequest(DataOutputStream out) throws IOException {
ReplaceBlockHeader header = new ReplaceBlockHeader(new VersionAndOpcode(
dataTransferProtocolVersion, DataTransferProtocol.OP_REPLACE_BLOCK));
header.set(namespaceId, block.getBlock().getBlockId(), block.getBlock()
.getGenerationStamp(), source.getStorageID(), proxySource);
header.writeVersionAndOpCode(out);
header.write(out);
out.flush();
}
示例11: receiveResponse
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
/**
* Receive a block copy response from the input stream
*/
private void receiveResponse(DataInputStream in) throws IOException {
short status = in.readShort();
if (status != DataTransferProtocol.OP_STATUS_SUCCESS) {
throw new IOException("block move is failed");
}
}
示例12: register
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
/**
* Register standby with this primary
*/
@Override
public int register() throws IOException {
enforceActive("Standby can only register with active namenode");
verifyCheckpointerAddress();
return DataTransferProtocol.DATA_TRANSFER_VERSION;
}
示例13: updateDataTransferProtocolVersionIfNeeded
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
void updateDataTransferProtocolVersionIfNeeded(int remoteDataTransferVersion) {
int newDataTransferVersion = 0;
if (remoteDataTransferVersion < DataTransferProtocol.DATA_TRANSFER_VERSION) {
// client is newer than server
newDataTransferVersion = remoteDataTransferVersion;
} else {
// client is older or the same as server
newDataTransferVersion = DataTransferProtocol.DATA_TRANSFER_VERSION;
}
synchronized (dataTransferVersion) {
if (dataTransferVersion != newDataTransferVersion) {
dataTransferVersion = newDataTransferVersion;
}
}
}
示例14: getOutPacketVersion
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
int getOutPacketVersion() throws IOException {
if (ifPacketIncludeVersion()) {
return this.preferredPacketVersion;
} else {
// If the server side runs on an older version that doesn't support
// packet version, the older format that checksum is in the first
// is used.
//
return DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST;
}
}
示例15: getHeartbeatPacket
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol; //导入依赖的package包/类
static DFSOutputStreamPacket getHeartbeatPacket(
DFSOutputStream dfsOutputStream, boolean includePktVersion,
int packetVersion) throws IOException {
if (packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) {
return new DFSOutputStreamPacketNonInlineChecksum(dfsOutputStream);
} else if (!includePktVersion) {
throw new IOException(
"Older version doesn't support inline checksum packet format.");
} else {
return new DFSOutputStreamPacketInlineChecksum(dfsOutputStream);
}
}