当前位置: 首页>>代码示例>>Java>>正文


Java NodeReport.getUsed方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.api.records.NodeReport.getUsed方法的典型用法代码示例。如果您正苦于以下问题:Java NodeReport.getUsed方法的具体用法?Java NodeReport.getUsed怎么用?Java NodeReport.getUsed使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.api.records.NodeReport的用法示例。


在下文中一共展示了NodeReport.getUsed方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getCurrentFreeClusterResources

import org.apache.hadoop.yarn.api.records.NodeReport; //导入方法依赖的package包/类
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:19,代码来源:AbstractYarnClusterDescriptor.java

示例2: getProvisionedNodeManagerCount

import org.apache.hadoop.yarn.api.records.NodeReport; //导入方法依赖的package包/类
/**
 * Helper function to verify DISTRIBUTED placement policies.
 * Returns the number of NodeManagers on which runnables got provisioned.
 * @return number of NodeManagers on which runnables got provisioned.
 */
private int getProvisionedNodeManagerCount() throws Exception {
  int provisionedNodeManagerCount = 0;
  for (NodeReport nodeReport : getNodeReports()) {
    Resource used = nodeReport.getUsed();
    if (used != null && used.getMemory() > 0) {
        provisionedNodeManagerCount++;
    }
  }
  return provisionedNodeManagerCount;
}
 
开发者ID:apache,项目名称:twill,代码行数:16,代码来源:PlacementPolicyTestRun.java

示例3: listDetailedClusterNodes

import org.apache.hadoop.yarn.api.records.NodeReport; //导入方法依赖的package包/类
/**
 * Lists the nodes which are matching the given node states along with
 * detailed node informations such as resource usage etc.
 *
 * @param nodeStates
 * @throws YarnException
 * @throws IOException
 */
private void listDetailedClusterNodes(Set<NodeState> nodeStates)
    throws YarnException, IOException {
  PrintWriter writer = new PrintWriter(new OutputStreamWriter(sysout,
      Charset.forName("UTF-8")));
  List<NodeReport> nodesReport = client.getNodeReports(nodeStates
      .toArray(new NodeState[0]));
  writer.println("Total Nodes:" + nodesReport.size());
  writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address",
      "Number-of-Running-Containers");
  for (NodeReport nodeReport : nodesReport) {
    writer.printf(NODES_PATTERN, nodeReport.getNodeId(),
        nodeReport.getNodeState(), nodeReport.getHttpAddress(),
        nodeReport.getNumContainers());
    writer.println("Detailed Node Information :");
    writer.print("\tConfigured Resources : ");
    writer.println(nodeReport.getCapability());
    writer.print("\tAllocated Resources : ");
    if (nodeReport.getUsed() != null) {
      writer.print(nodeReport.getUsed());
    }
    writer.println();

    writer.print("\tResource Utilization by Node : ");
    if (nodeReport.getNodeUtilization() != null) {
      writer.print("PMem:"
          + nodeReport.getNodeUtilization().getPhysicalMemory()
          + " MB, VMem:" + nodeReport.getNodeUtilization().getVirtualMemory()
          + " MB, VCores:" + nodeReport.getNodeUtilization().getCPU());
    }
    writer.println();

    writer.print("\tResource Utilization by Containers : ");
    if (nodeReport.getAggregatedContainersUtilization() != null) {
      writer.print("PMem:"
          + nodeReport.getAggregatedContainersUtilization()
              .getPhysicalMemory()
          + " MB, VMem:"
          + nodeReport.getAggregatedContainersUtilization()
              .getVirtualMemory() + " MB, VCores:"
          + nodeReport.getAggregatedContainersUtilization().getCPU());
    }
    writer.println();

    writer.print("\tNode-Labels : ");
    // Create a List for node labels since we need it get sorted
    List<String> nodeLabelsList = new ArrayList<String>(
        nodeReport.getNodeLabels());
    Collections.sort(nodeLabelsList);
    writer.println(StringUtils.join(nodeLabelsList.iterator(), ','));
  }
  writer.flush();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:61,代码来源:NodeCLI.java

示例4: verifyClusterCapability

import org.apache.hadoop.yarn.api.records.NodeReport; //导入方法依赖的package包/类
/**
 * Verify the cluster configuration (number and capability of node managers) required for the tests.
 */
@BeforeClass
public static void verifyClusterCapability() throws InterruptedException {
  // Ignore verifications if it is running against older Hadoop versions which does not support blacklists.
  Assume.assumeTrue(YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_22));

  // All runnables in this test class use same resource specification for the sake of convenience.
  resource = ResourceSpecification.Builder.with()
    .setVirtualCores(RUNNABLE_CORES)
    .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
    .build();
  twoInstancesResource = ResourceSpecification.Builder.with()
    .setVirtualCores(RUNNABLE_CORES)
    .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
    .setInstances(2)
    .build();

  // The tests need exactly three NodeManagers in the cluster.
  int trials = 0;
  while (trials++ < 20) {
    try {
      nodeReports = TWILL_TESTER.getNodeReports();
      if (nodeReports != null && nodeReports.size() == 3) {
        break;
      }
    } catch (Exception e) {
      LOG.error("Failed to get node reports", e);
    }
    LOG.warn("NodeManagers != 3. {}", nodeReports);
    TimeUnit.SECONDS.sleep(1);
  }

  // All NodeManagers should have enough capacity available to accommodate at least two runnables.
  for (NodeReport nodeReport : nodeReports) {
    Resource capability = nodeReport.getCapability();
    Resource used = nodeReport.getUsed();
    Assert.assertNotNull(capability);
    if (used != null) {
      Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory() - used.getMemory());
    } else {
      Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory());
    }
  }
}
 
开发者ID:apache,项目名称:twill,代码行数:47,代码来源:PlacementPolicyTestRun.java


注:本文中的org.apache.hadoop.yarn.api.records.NodeReport.getUsed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。