本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.RegisterCommand类的典型用法代码示例。如果您正苦于以下问题:Java RegisterCommand类的具体用法?Java RegisterCommand怎么用?Java RegisterCommand使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RegisterCommand类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了RegisterCommand类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convert
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd());
default:
return null;
}
}
示例2: convert
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd());
case BlockECRecoveryCommand:
return PBHelper.convert(proto.getBlkECRecoveryCmd());
default:
return null;
}
}
示例3: convert
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
}
return null;
}
示例4: convert
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
}
return null;
}
示例5: testDeadDatanode
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; //导入依赖的package包/类
/**
* Test to ensure namenode rejects request from dead datanode
* - Start a cluster
* - Shutdown the datanode and wait for it to be marked dead at the namenode
* - Send datanode requests to Namenode and make sure it is rejected
* appropriately.
*/
@Test
public void testDeadDatanode() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
String poolId = cluster.getNamesystem().getBlockPoolId();
// wait for datanode to be marked live
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration reg =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
// Shutdown and wait for datanode to be marked dead
dn.shutdown();
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
DatanodeProtocol dnp = cluster.getNameNodeRpc();
ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(
new Block(0),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
null) };
StorageReceivedDeletedBlocks[] storageBlocks = {
new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };
// Ensure blockReceived call from dead datanode is not rejected with
// IOException, since it's async, but the node remains unregistered.
dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
BlockManager bm = cluster.getNamesystem().getBlockManager();
// IBRs are async, make sure the NN processes all of them.
bm.flushBlockOps();
assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
// Ensure blockReport from dead datanode is rejected with IOException
StorageBlockReport[] report = { new StorageBlockReport(
new DatanodeStorage(reg.getDatanodeUuid()),
BlockListAsLongs.EMPTY) };
try {
dnp.blockReport(reg, poolId, report,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
fail("Expected IOException is not thrown");
} catch (IOException ex) {
// Expected
}
// Ensure heartbeat from dead datanode is rejected with a command
// that asks datanode to register again
StorageReport[] rep = { new StorageReport(
new DatanodeStorage(reg.getDatanodeUuid()),
false, 0, 0, 0, 0) };
DatanodeCommand[] cmd =
dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
assertEquals(1, cmd.length);
assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction());
}
示例6: handleHeartbeat
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; //导入依赖的package包/类
/**
* Handle heartbeat from datanodes.
*/
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
final String blockPoolId, long capacity, long dfsUsed, long remaining,
long blockPoolUsed, int xceiverCount, int maxTransfers, int failedVolumes)
throws IOException {
synchronized (heartbeatManager) {
synchronized (datanodeMap) {
DatanodeDescriptor nodeinfo = null;
try {
nodeinfo = getDatanode(nodeReg);
} catch (UnregisteredNodeException e) {
return new DatanodeCommand[]{RegisterCommand.REGISTER};
}
// Check if this datanode should actually be shutdown instead.
if (nodeinfo != null && nodeinfo.isDisallowed()) {
setDatanodeDead(nodeinfo);
throw new DisallowedDatanodeException(nodeinfo);
}
if (nodeinfo == null || !nodeinfo.isAlive) {
return new DatanodeCommand[]{RegisterCommand.REGISTER};
}
heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, remaining,
blockPoolUsed, xceiverCount, failedVolumes);
//check lease recovery
BlockInfoUnderConstruction[] blocks =
nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
if (blocks != null) {
BlockRecoveryCommand brCommand =
new BlockRecoveryCommand(blocks.length);
for (BlockInfoUnderConstruction b : blocks) {
brCommand.add(new RecoveringBlock(new ExtendedBlock(blockPoolId, b),
getDataNodeDescriptorsTx(b), b.getBlockRecoveryId()));
}
return new DatanodeCommand[]{brCommand};
}
final List<DatanodeCommand> cmds = new ArrayList<>();
//check pending replication
List<BlockTargetPair> pendingList =
nodeinfo.getReplicationCommand(maxTransfers);
if (pendingList != null) {
cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
pendingList));
}
//check block invalidation
Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
if (blks != null) {
cmds.add(
new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId,
blks));
}
blockManager.addKeyUpdateCommand(cmds, nodeinfo);
// check for balancer bandwidth update
if (nodeinfo.getBalancerBandwidth() > 0) {
cmds.add(
new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth()));
// set back to 0 to indicate that datanode has been sent the new value
nodeinfo.setBalancerBandwidth(0);
}
if (!cmds.isEmpty()) {
return cmds.toArray(new DatanodeCommand[cmds.size()]);
}
}
}
return new DatanodeCommand[0];
}