本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates类的典型用法代码示例。如果您正苦于以下问题:Java AdminStates类的具体用法?Java AdminStates怎么用?Java AdminStates使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
AdminStates类属于org.apache.hadoop.hdfs.protocol.DatanodeInfo包,在下文中一共展示了AdminStates类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getLiveNodeCount
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* Get the number of live datanodes.
*
* @param json JSON string that contains live node status.
* @param nn namenode status to return information in
*/
private static void getLiveNodeCount(String json, NamenodeStatus nn)
throws IOException {
// Map of datanode host to (map of attribute name to value)
Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
if (nodeMap == null || nodeMap.isEmpty()) {
return;
}
nn.liveDatanodeCount = nodeMap.size();
for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
// Inner map of attribute name to value
Map<String, Object> innerMap = entry.getValue();
if (innerMap != null) {
if (innerMap.get("adminState")
.equals(AdminStates.DECOMMISSIONED.toString())) {
nn.liveDecomCount++;
}
}
}
}
示例2: getDecommissionNodeStatus
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* Get the decommisioning datanode information.
*
* @param dataNodeStatusMap map with key being datanode, value being an
* inner map (key:namenode, value:decommisionning state).
* @param host datanode
* @param json String
*/
private static void getDecommissionNodeStatus(
Map<String, Map<String, String>> dataNodeStatusMap, String host,
String json) throws IOException {
Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
if (nodeMap == null || nodeMap.isEmpty()) {
return;
}
List<String> decomming = new ArrayList<String>();
for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
String dn = entry.getKey();
decomming.add(dn);
// nn-status
Map<String, String> nnStatus = new HashMap<String, String>();
if (dataNodeStatusMap.containsKey(dn)) {
nnStatus = dataNodeStatusMap.get(dn);
}
nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
// dn-nn-status
dataNodeStatusMap.put(dn, nnStatus);
}
}
示例3: createLocatedBlock
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
AdminStates.NORMAL),
};
String[] storageIDs = {"s1", "s2", "s3", "s4"};
StorageType[] media = {
StorageType.DISK,
StorageType.SSD,
StorageType.DISK,
StorageType.RAM_DISK
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53),
dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
示例4: createLocatedBlockNoStorageMedia
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
示例5: createLocatedBlockNoStorageMedia
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
lb.setStartOffset(5);
return lb;
}
示例6: testClusterStats
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
public void testClusterStats(int numNameNodes) throws IOException,
InterruptedException {
LOG.info("Starting test testClusterStats");
int numDatanodes = 1;
startCluster(numNameNodes, numDatanodes, conf);
for (int i = 0; i < numNameNodes; i++) {
FileSystem fileSys = cluster.getFileSystem(i);
Path file = new Path("testClusterStats.dat");
writeFile(fileSys, file, 1);
FSNamesystem fsn = cluster.getNamesystem(i);
NameNode namenode = cluster.getNameNode(i);
DatanodeInfo downnode = decommissionNode(i, null, null,
AdminStates.DECOMMISSION_INPROGRESS);
// Check namenode stats for multiple datanode heartbeats
verifyStats(namenode, fsn, downnode, true);
// Stop decommissioning and verify stats
writeConfigFile(excludeFile, null);
refreshNodes(fsn, conf);
DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
waitNodeState(ret, AdminStates.NORMAL);
verifyStats(namenode, fsn, ret, false);
}
}
示例7: testClusterStats
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
public void testClusterStats(int numNameNodes, boolean federation) throws IOException,
InterruptedException {
LOG.info("Starting test testClusterStats");
int numDatanodes = 1;
startCluster(numNameNodes, numDatanodes, conf, federation);
for (int i = 0; i < numNameNodes; i++) {
FileSystem fileSys = cluster.getFileSystem(i);
Path file = new Path("testClusterStats.dat");
writeFile(fileSys, file, 1);
NameNode namenode = cluster.getNameNode(i);
FSNamesystem fsn = namenode.namesystem;
DatanodeInfo downnode = decommissionNode(i, null,
AdminStates.DECOMMISSION_INPROGRESS);
// Check namenode stats for multiple datanode heartbeats
verifyStats(namenode, fsn, downnode, true);
// Stop decommissioning and verify stats
writeConfigFile(excludeFile, null);
fsn.refreshNodes(conf);
DatanodeInfo ret = fsn.getDatanode(downnode);
waitNodeState(ret, AdminStates.NORMAL);
verifyStats(namenode, fsn, ret, false);
}
}
示例8: getLiveNodeCount
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* Process JSON string returned from connection to get the number of
* live datanodes.
*
* @param json JSON output that contains live node status.
* @param nn namenode status to return information in
*/
private static void getLiveNodeCount(String json, NamenodeStatus nn)
throws IOException {
// Map of datanode host to (map of attribute name to value)
Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
if (nodeMap == null || nodeMap.isEmpty()) {
return;
}
nn.liveDatanodeCount = nodeMap.size();
for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
// Inner map of attribute name to value
Map<String, Object> innerMap = entry.getValue();
if (innerMap != null) {
if (((String) innerMap.get("adminState"))
.equals(AdminStates.DECOMMISSIONED.toString())) {
nn.liveDecomCount++;
}
if (((Boolean) innerMap.get("excluded"))
.booleanValue() == true) {
nn.liveExcludeCount++;
}
}
}
}
示例9: getDecommissionNodeStatus
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* We process the JSON string returned from http or local fsnamesystem
* to get the decommisioning datanode information.
*
* @param dataNodeStatusMap map with key being datanode, value being an
* inner map (key:namenode, value:decommisionning state).
* @param address
* @param json JSON string returned
*/
private static void getDecommissionNodeStatus(
Map<String, Map<String, String>> dataNodeStatusMap, String address,
String json) throws IOException {
Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
if (nodeMap == null || nodeMap.isEmpty()) {
return;
}
List<String> decomming = new ArrayList<String>();
for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
String dn = entry.getKey();
decomming.add(dn);
// nn-status
Map<String, String> nnStatus = new HashMap<String, String>();
if (dataNodeStatusMap.containsKey(dn)) {
nnStatus = dataNodeStatusMap.get(dn);
}
nnStatus.put(address, AdminStates.DECOMMISSION_INPROGRESS.toString());
// dn-nn-status
dataNodeStatusMap.put(dn, nnStatus);
}
}
示例10: toDatanodeInfo
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/** Convert a Json map to an DatanodeInfo object. */
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
if (m == null) {
return null;
}
return new DatanodeInfo(
(String)m.get("ipAddr"),
(String)m.get("hostName"),
(String)m.get("storageID"),
(int)(long)(Long)m.get("xferPort"),
(int)(long)(Long)m.get("infoPort"),
(int)(long)(Long)m.get("ipcPort"),
(Long)m.get("capacity"),
(Long)m.get("dfsUsed"),
(Long)m.get("remaining"),
(Long)m.get("blockPoolUsed"),
(Long)m.get("lastUpdate"),
(int)(long)(Long)m.get("xceiverCount"),
(String)m.get("networkLocation"),
AdminStates.valueOf((String)m.get("adminState")));
}
示例11: getLiveNodeCount
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* Get the number of live datanodes.
*
* @param json JSON string that contains live node status.
* @param nn namenode status to return information in
*/
private static void getLiveNodeCount(String json, NamenodeStatus nn)
throws IOException {
// Map of datanode host to (map of attribute name to value)
Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
if (nodeMap == null || nodeMap.isEmpty()) {
return;
}
nn.liveDatanodeCount = nodeMap.size();
for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
// Inner map of attribute name to value
Map<String, Object> innerMap = entry.getValue();
if (innerMap != null) {
if (((String) innerMap.get("adminState"))
.equals(AdminStates.DECOMMISSIONED.toString())) {
nn.liveDecomCount++;
}
}
}
}
示例12: getDecommissionNodeStatus
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* Get the decommisioning datanode information.
*
* @param dataNodeStatusMap map with key being datanode, value being an
* inner map (key:namenode, value:decommisionning state).
* @param host datanode
* @param decomnode DecommissionNode
* @param json String
*/
private static void getDecommissionNodeStatus(
Map<String, Map<String, String>> dataNodeStatusMap, String host,
String json) throws IOException {
Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
if (nodeMap == null || nodeMap.isEmpty()) {
return;
}
List<String> decomming = new ArrayList<String>();
for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
String dn = entry.getKey();
decomming.add(dn);
// nn-status
Map<String, String> nnStatus = new HashMap<String, String>();
if (dataNodeStatusMap.containsKey(dn)) {
nnStatus = dataNodeStatusMap.get(dn);
}
nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
// dn-nn-status
dataNodeStatusMap.put(dn, nnStatus);
}
}
示例13: createLocatedBlock
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
示例14: testClusterStats
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
public void testClusterStats(int numNameNodes) throws IOException,
InterruptedException {
LOG.info("Starting test testClusterStats");
int numDatanodes = 1;
startCluster(numNameNodes, numDatanodes, conf);
for (int i = 0; i < numNameNodes; i++) {
FileSystem fileSys = cluster.getFileSystem(i);
Path file = new Path("testClusterStats.dat");
writeFile(fileSys, file, 1);
FSNamesystem fsn = cluster.getNamesystem(i);
NameNode namenode = cluster.getNameNode(i);
DatanodeInfo downnode = decommissionNode(i, null,
AdminStates.DECOMMISSION_INPROGRESS);
// Check namenode stats for multiple datanode heartbeats
verifyStats(namenode, fsn, downnode, true);
// Stop decommissioning and verify stats
writeConfigFile(excludeFile, null);
refreshNodes(fsn, conf);
DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
waitNodeState(ret, AdminStates.NORMAL);
verifyStats(namenode, fsn, ret, false);
}
}
示例15: toDatanodeInfo
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; //导入依赖的package包/类
/**
* Convert a Json map to an DatanodeInfo object.
*/
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
if (m == null) {
return null;
}
return new DatanodeInfo((String) m.get("ipAddr"),
(String) m.get("hostName"), (String) m.get("storageID"),
(int) (long) (Long) m.get("xferPort"),
(int) (long) (Long) m.get("infoPort"),
(int) (long) (Long) m.get("ipcPort"),
(Long) m.get("capacity"), (Long) m.get("dfsUsed"),
(Long) m.get("remaining"), (Long) m.get("blockPoolUsed"),
(Long) m.get("lastUpdate"), (int) (long) (Long) m.get("xceiverCount"),
(String) m.get("networkLocation"),
AdminStates.valueOf((String) m.get("adminState")));
}