本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.getCapacity方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeDescriptor.getCapacity方法的具体用法?Java DatanodeDescriptor.getCapacity怎么用?Java DatanodeDescriptor.getCapacity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor
的用法示例。
在下文中一共展示了DatanodeDescriptor.getCapacity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: updateDatanodeMap
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入方法依赖的package包/类
/**
* Does a lot of hacks to change namenode and datanode datastructures to
* identify datanodes by the machine name rather than the IP address. This is
* done since we can give each datanode a different hostname in a unit test
* but not a different ip address.
*
* @param cluster
* the {@link MiniDFSCluster} to operate on
* @throws Exception
*/
private static void updateDatanodeMap(MiniDFSCluster cluster)
throws Exception {
FSNamesystem namesystem = cluster.getNameNode().namesystem;
for (DataNode node : cluster.getDataNodes()) {
// Get old descriptor.
DatanodeID dnId = createDataNodeID(node);
DatanodeDescriptor dnDs = namesystem.getDatanode(dnId);
// Create new id and descriptor.
DatanodeID newId = new DatanodeID(node.getMachineName(),
dnDs.getStorageID(), dnDs.getInfoPort(), dnDs.getIpcPort());
DatanodeDescriptor newDS = new DatanodeDescriptor(newId,
dnDs.getNetworkLocation(), dnDs.getHostName(), dnDs.getCapacity(),
dnDs.getDfsUsed(), dnDs.getRemaining(), dnDs.getNamespaceUsed(),
dnDs.getXceiverCount());
newDS.isAlive = true;
// Overwrite NN maps with new descriptor.
namesystem.writeLock();
namesystem.clusterMap.remove(dnDs);
namesystem.resolveNetworkLocation(newDS);
namesystem.unprotectedAddDatanode(newDS);
namesystem.clusterMap.add(newDS);
namesystem.writeUnlock();
// Overwrite DN map with new registration.
node.setRegistrationName(node.getMachineName());
}
}
示例2: getLiveDatanodeCapacity
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入方法依赖的package包/类
public static long getLiveDatanodeCapacity(FSNamesystem ns) {
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
ns.DFSNodesStatus(live, dead);
long capacity = 0;
for (final DatanodeDescriptor dn : live) {
capacity += dn.getCapacity();
}
return capacity;
}
示例3: waitForDatanodeStatus
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入方法依赖的package包/类
public static void waitForDatanodeStatus(FSNamesystem ns, int expectedLive,
int expectedDead, long expectedVolFails, long expectedTotalCapacity,
long timeout) throws InterruptedException, TimeoutException {
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final int ATTEMPTS = 10;
int count = 0;
long currTotalCapacity = 0;
int volFails = 0;
do {
Thread.sleep(timeout);
live.clear();
dead.clear();
ns.DFSNodesStatus(live, dead);
currTotalCapacity = 0;
volFails = 0;
for (final DatanodeDescriptor dd : live) {
currTotalCapacity += dd.getCapacity();
volFails += dd.getVolumeFailures();
}
count++;
} while ((expectedLive != live.size() ||
expectedDead != dead.size() ||
expectedTotalCapacity != currTotalCapacity ||
expectedVolFails != volFails)
&& count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for capacity."
+ " Live = "+live.size()+" Expected = "+expectedLive
+ " Dead = "+dead.size()+" Expected = "+expectedDead
+ " Total capacity = "+currTotalCapacity
+ " Expected = "+expectedTotalCapacity
+ " Vol Fails = "+volFails+" Expected = "+expectedVolFails);
}
}