本文整理匯總了Java中org.apache.hadoop.yarn.api.records.NodeReport.getCapability方法的典型用法代碼示例。如果您正苦於以下問題:Java NodeReport.getCapability方法的具體用法?Java NodeReport.getCapability怎麽用?Java NodeReport.getCapability使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.api.records.NodeReport
的用法示例。
在下文中一共展示了NodeReport.getCapability方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getClusterDescription
import org.apache.hadoop.yarn.api.records.NodeReport; //導入方法依賴的package包/類
@Override
public String getClusterDescription() {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
YarnClusterMetrics metrics = yarnClient.getYarnClusterMetrics();
ps.append("NodeManagers in the ClusterClient " + metrics.getNumNodeManagers());
List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);
final String format = "|%-16s |%-16s %n";
ps.printf("|Property |Value %n");
ps.println("+---------------------------------------+");
int totalMemory = 0;
int totalCores = 0;
for (NodeReport rep : nodes) {
final Resource res = rep.getCapability();
totalMemory += res.getMemory();
totalCores += res.getVirtualCores();
ps.format(format, "NodeID", rep.getNodeId());
ps.format(format, "Memory", res.getMemory() + " MB");
ps.format(format, "vCores", res.getVirtualCores());
ps.format(format, "HealthReport", rep.getHealthReport());
ps.format(format, "Containers", rep.getNumContainers());
ps.println("+---------------------------------------+");
}
ps.println("Summary: totalMemory " + totalMemory + " totalCores " + totalCores);
List<QueueInfo> qInfo = yarnClient.getAllQueues();
for (QueueInfo q : qInfo) {
ps.println("Queue: " + q.getQueueName() + ", Current Capacity: " + q.getCurrentCapacity() + " Max Capacity: " +
q.getMaximumCapacity() + " Applications: " + q.getApplications().size());
}
return baos.toString();
} catch (Exception e) {
throw new RuntimeException("Couldn't get cluster description", e);
}
}
示例2: verifyClusterCapability
import org.apache.hadoop.yarn.api.records.NodeReport; //導入方法依賴的package包/類
/**
* Verify the cluster configuration (number and capability of node managers) required for the tests.
*/
@BeforeClass
public static void verifyClusterCapability() throws InterruptedException {
// Ignore verifications if it is running against older Hadoop versions which does not support blacklists.
Assume.assumeTrue(YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_22));
// All runnables in this test class use same resource specification for the sake of convenience.
resource = ResourceSpecification.Builder.with()
.setVirtualCores(RUNNABLE_CORES)
.setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
.build();
twoInstancesResource = ResourceSpecification.Builder.with()
.setVirtualCores(RUNNABLE_CORES)
.setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
.setInstances(2)
.build();
// The tests need exactly three NodeManagers in the cluster.
int trials = 0;
while (trials++ < 20) {
try {
nodeReports = TWILL_TESTER.getNodeReports();
if (nodeReports != null && nodeReports.size() == 3) {
break;
}
} catch (Exception e) {
LOG.error("Failed to get node reports", e);
}
LOG.warn("NodeManagers != 3. {}", nodeReports);
TimeUnit.SECONDS.sleep(1);
}
// All NodeManagers should have enough capacity available to accommodate at least two runnables.
for (NodeReport nodeReport : nodeReports) {
Resource capability = nodeReport.getCapability();
Resource used = nodeReport.getUsed();
Assert.assertNotNull(capability);
if (used != null) {
Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory() - used.getMemory());
} else {
Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory());
}
}
}