本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates.DECOMMISSION_INPROGRESS属性的典型用法代码示例。如果您正苦于以下问题:Java AdminStates.DECOMMISSION_INPROGRESS属性的具体用法?Java AdminStates.DECOMMISSION_INPROGRESS怎么用?Java AdminStates.DECOMMISSION_INPROGRESS使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates
的用法示例。
在下文中一共展示了AdminStates.DECOMMISSION_INPROGRESS属性的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convert
public static AdminStates convert(AdminState adminState) {
switch(adminState) {
case DECOMMISSION_INPROGRESS:
return AdminStates.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED:
return AdminStates.DECOMMISSIONED;
case NORMAL:
default:
return AdminStates.NORMAL;
}
}
示例2: convert
public static AdminStates convert(AdminState adminState) {
switch (adminState) {
case DECOMMISSION_INPROGRESS:
return AdminStates.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED:
return AdminStates.DECOMMISSIONED;
case NORMAL:
default:
return AdminStates.NORMAL;
}
}
示例3: nodeUsageVerification
@SuppressWarnings({ "unchecked" })
public void nodeUsageVerification(int numDatanodes, long[] nodesCapacity,
AdminStates decommissionState) throws IOException, InterruptedException {
Map<String, Map<String, String>> usage = null;
DatanodeInfo decommissionedNodeInfo = null;
String zeroNodeUsage = "0.00%";
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
FileSystem fileSys = null;
Path file1 = new Path("testNodeUsage.dat");
try {
SimulatedFSDataset.setFactory(conf);
cluster =
new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
.numDataNodes(numDatanodes)
.simulatedCapacities(nodesCapacity).build();
cluster.waitActive();
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
validateCluster(client, numDatanodes);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
new ArrayList<ArrayList<DatanodeInfo>>(1);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
if (decommissionState == AdminStates.DECOMMISSIONED) {
// Move datanode1 to Decommissioned state
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
decommissionedNodeInfo = decommissionNode(0, null,
decommissionedNode, decommissionState);
}
// Write a file(replica 1).Hence will be written to only one live node.
fileSys = cluster.getFileSystem(0);
FSNamesystem ns = cluster.getNamesystem(0);
writeFile(fileSys, file1, 1);
Thread.sleep(2000);
// min NodeUsage should not be 0.00%
usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
String minUsageBeforeDecom = usage.get("nodeUsage").get("min");
assertTrue(!minUsageBeforeDecom.equalsIgnoreCase(zeroNodeUsage));
if (decommissionState == AdminStates.DECOMMISSION_INPROGRESS) {
// Start decommissioning datanode
ArrayList<DatanodeInfo> decommissioningNodes = namenodeDecomList.
get(0);
decommissionedNodeInfo = decommissionNode(0, null,
decommissioningNodes, decommissionState);
// NodeUsage should not include DECOMMISSION_INPROGRESS node
// (minUsage should be 0.00%)
usage = (Map<String, Map<String, String>>)
JSON.parse(ns.getNodeUsage());
assertTrue(usage.get("nodeUsage").get("min").
equalsIgnoreCase(zeroNodeUsage));
}
// Recommission node
recommissionNode(0, decommissionedNodeInfo);
usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
String nodeusageAfterRecommi =
decommissionState == AdminStates.DECOMMISSION_INPROGRESS
? minUsageBeforeDecom
: zeroNodeUsage;
assertTrue(usage.get("nodeUsage").get("min").
equalsIgnoreCase(nodeusageAfterRecommi));
} finally {
cleanupFile(fileSys, file1);
cluster.shutdown();
}
}