當前位置: 首頁>>代碼示例>>Java>>正文


Java NodeReport.getUsed方法代碼示例

本文整理匯總了Java中org.apache.hadoop.yarn.api.records.NodeReport.getUsed方法的典型用法代碼示例。如果您正苦於以下問題:Java NodeReport.getUsed方法的具體用法?Java NodeReport.getUsed怎麽用?Java NodeReport.getUsed使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.yarn.api.records.NodeReport的用法示例。


在下文中一共展示了NodeReport.getUsed方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getCurrentFreeClusterResources

import org.apache.hadoop.yarn.api.records.NodeReport; //導入方法依賴的package包/類
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:19,代碼來源:AbstractYarnClusterDescriptor.java

示例2: getProvisionedNodeManagerCount

import org.apache.hadoop.yarn.api.records.NodeReport; //導入方法依賴的package包/類
/**
 * Helper function to verify DISTRIBUTED placement policies.
 * Returns the number of NodeManagers on which runnables got provisioned.
 * @return number of NodeManagers on which runnables got provisioned.
 */
private int getProvisionedNodeManagerCount() throws Exception {
  int provisionedNodeManagerCount = 0;
  for (NodeReport nodeReport : getNodeReports()) {
    Resource used = nodeReport.getUsed();
    if (used != null && used.getMemory() > 0) {
        provisionedNodeManagerCount++;
    }
  }
  return provisionedNodeManagerCount;
}
 
開發者ID:apache,項目名稱:twill,代碼行數:16,代碼來源:PlacementPolicyTestRun.java

示例3: listDetailedClusterNodes

import org.apache.hadoop.yarn.api.records.NodeReport; //導入方法依賴的package包/類
/**
 * Lists the nodes which are matching the given node states along with
 * detailed node informations such as resource usage etc.
 *
 * @param nodeStates
 * @throws YarnException
 * @throws IOException
 */
private void listDetailedClusterNodes(Set<NodeState> nodeStates)
    throws YarnException, IOException {
  PrintWriter writer = new PrintWriter(new OutputStreamWriter(sysout,
      Charset.forName("UTF-8")));
  List<NodeReport> nodesReport = client.getNodeReports(nodeStates
      .toArray(new NodeState[0]));
  writer.println("Total Nodes:" + nodesReport.size());
  writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address",
      "Number-of-Running-Containers");
  for (NodeReport nodeReport : nodesReport) {
    writer.printf(NODES_PATTERN, nodeReport.getNodeId(),
        nodeReport.getNodeState(), nodeReport.getHttpAddress(),
        nodeReport.getNumContainers());
    writer.println("Detailed Node Information :");
    writer.print("\tConfigured Resources : ");
    writer.println(nodeReport.getCapability());
    writer.print("\tAllocated Resources : ");
    if (nodeReport.getUsed() != null) {
      writer.print(nodeReport.getUsed());
    }
    writer.println();

    writer.print("\tResource Utilization by Node : ");
    if (nodeReport.getNodeUtilization() != null) {
      writer.print("PMem:"
          + nodeReport.getNodeUtilization().getPhysicalMemory()
          + " MB, VMem:" + nodeReport.getNodeUtilization().getVirtualMemory()
          + " MB, VCores:" + nodeReport.getNodeUtilization().getCPU());
    }
    writer.println();

    writer.print("\tResource Utilization by Containers : ");
    if (nodeReport.getAggregatedContainersUtilization() != null) {
      writer.print("PMem:"
          + nodeReport.getAggregatedContainersUtilization()
              .getPhysicalMemory()
          + " MB, VMem:"
          + nodeReport.getAggregatedContainersUtilization()
              .getVirtualMemory() + " MB, VCores:"
          + nodeReport.getAggregatedContainersUtilization().getCPU());
    }
    writer.println();

    writer.print("\tNode-Labels : ");
    // Create a List for node labels since we need it get sorted
    List<String> nodeLabelsList = new ArrayList<String>(
        nodeReport.getNodeLabels());
    Collections.sort(nodeLabelsList);
    writer.println(StringUtils.join(nodeLabelsList.iterator(), ','));
  }
  writer.flush();
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:61,代碼來源:NodeCLI.java

示例4: verifyClusterCapability

import org.apache.hadoop.yarn.api.records.NodeReport; //導入方法依賴的package包/類
/**
 * Verify the cluster configuration (number and capability of node managers) required for the tests.
 */
@BeforeClass
public static void verifyClusterCapability() throws InterruptedException {
  // Ignore verifications if it is running against older Hadoop versions which does not support blacklists.
  Assume.assumeTrue(YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_22));

  // All runnables in this test class use same resource specification for the sake of convenience.
  resource = ResourceSpecification.Builder.with()
    .setVirtualCores(RUNNABLE_CORES)
    .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
    .build();
  twoInstancesResource = ResourceSpecification.Builder.with()
    .setVirtualCores(RUNNABLE_CORES)
    .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
    .setInstances(2)
    .build();

  // The tests need exactly three NodeManagers in the cluster.
  int trials = 0;
  while (trials++ < 20) {
    try {
      nodeReports = TWILL_TESTER.getNodeReports();
      if (nodeReports != null && nodeReports.size() == 3) {
        break;
      }
    } catch (Exception e) {
      LOG.error("Failed to get node reports", e);
    }
    LOG.warn("NodeManagers != 3. {}", nodeReports);
    TimeUnit.SECONDS.sleep(1);
  }

  // All NodeManagers should have enough capacity available to accommodate at least two runnables.
  for (NodeReport nodeReport : nodeReports) {
    Resource capability = nodeReport.getCapability();
    Resource used = nodeReport.getUsed();
    Assert.assertNotNull(capability);
    if (used != null) {
      Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory() - used.getMemory());
    } else {
      Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory());
    }
  }
}
 
開發者ID:apache,項目名稱:twill,代碼行數:47,代碼來源:PlacementPolicyTestRun.java


注:本文中的org.apache.hadoop.yarn.api.records.NodeReport.getUsed方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。