当前位置: 首页>>代码示例>>Java>>正文


Java MockRM.sendAMLaunched方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.server.resourcemanager.MockRM.sendAMLaunched方法的典型用法代码示例。如果您正苦于以下问题:Java MockRM.sendAMLaunched方法的具体用法?Java MockRM.sendAMLaunched怎么用?Java MockRM.sendAMLaunched使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.server.resourcemanager.MockRM的用法示例。


在下文中一共展示了MockRM.sendAMLaunched方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMinimumAllocation

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
private void testMinimumAllocation(YarnConfiguration conf, int testAlloc)
    throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();

  // Register node1
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);

  // Submit an application
  RMApp app1 = rm.submitApp(testAlloc);

  // kick the scheduling
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 =
      rm.getResourceScheduler().getNodeReport(nm1.getNodeId());

  int checkAlloc =
      conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
  Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory());

  rm.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestFifoScheduler.java

示例2: launchAM

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
private MockAM launchAM(RMApp app, MockRM rm, MockNM nm)
    throws Exception {
  RMAppAttempt attempt = app.getCurrentAppAttempt();
  nm.nodeHeartbeat(true);
  MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
  am.registerAppAttempt();
  rm.waitForState(app.getApplicationId(), RMAppState.RUNNING);
  return am;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestCapacityScheduler.java

示例3: testAllocateContainerOnNodeWithoutOffSwitchSpecified

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testAllocateContainerOnNodeWithoutOffSwitchSpecified()
    throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);

  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);

  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();

  // add request for containers
  List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
  requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1));
  requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1));
  am1.allocate(requests, null); // send the request

  try {
    // kick the schedule
    nm1.nodeHeartbeat(true);
  } catch (NullPointerException e) {
    Assert.fail("NPE when allocating container on node but "
        + "forget to set off-switch request should be handled");
  }
  rm.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestFifoScheduler.java

示例4: testRMWritingMassiveHistory

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
private void testRMWritingMassiveHistory(MockRM rm) throws Exception {
  rm.start();
  MockNM nm = rm.registerNode("127.0.0.1:1234", 1024 * 10100);

  RMApp app = rm.submitApp(1024);
  nm.nodeHeartbeat(true);
  RMAppAttempt attempt = app.getCurrentAppAttempt();
  MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
  am.registerAppAttempt();

  int request = 10000;
  am.allocate("127.0.0.1", 1024, request, new ArrayList<ContainerId>());
  nm.nodeHeartbeat(true);
  List<Container> allocated =
      am.allocate(new ArrayList<ResourceRequest>(),
        new ArrayList<ContainerId>()).getAllocatedContainers();
  int waitCount = 0;
  int allocatedSize = allocated.size();
  while (allocatedSize < request && waitCount++ < 200) {
    Thread.sleep(300);
    allocated =
        am.allocate(new ArrayList<ResourceRequest>(),
          new ArrayList<ContainerId>()).getAllocatedContainers();
    allocatedSize += allocated.size();
    nm.nodeHeartbeat(true);
  }
  Assert.assertEquals(request, allocatedSize);

  am.unregisterAppAttempt();
  am.waitForState(RMAppAttemptState.FINISHING);
  nm.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
  am.waitForState(RMAppAttemptState.FINISHED);

  NodeHeartbeatResponse resp = nm.nodeHeartbeat(true);
  List<ContainerId> cleaned = resp.getContainersToCleanup();
  int cleanedSize = cleaned.size();
  waitCount = 0;
  while (cleanedSize < allocatedSize && waitCount++ < 200) {
    Thread.sleep(300);
    resp = nm.nodeHeartbeat(true);
    cleaned = resp.getContainersToCleanup();
    cleanedSize += cleaned.size();
  }
  Assert.assertEquals(allocatedSize, cleanedSize);
  rm.waitForState(app.getApplicationId(), RMAppState.FINISHED);

  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestRMApplicationHistoryWriter.java

示例5: testAppReservationWithDominantResourceCalculator

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testAppReservationWithDominantResourceCalculator() throws Exception {
  CapacitySchedulerConfiguration csconf =
      new CapacitySchedulerConfiguration();
  csconf.setResourceComparator(DominantResourceCalculator.class);

  YarnConfiguration conf = new YarnConfiguration(csconf);
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
    ResourceScheduler.class);

  MockRM rm = new MockRM(conf);
  rm.start();

  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10 * GB, 1, 1);

  // register extra nodes to bump up cluster resource
  MockNM nm2 = rm.registerNode("127.0.0.1:1235", 10 * GB, 4, 4);
  rm.registerNode("127.0.0.1:1236", 10 * GB, 4, 4);

  RMApp app1 = rm.submitApp(1024);
  // kick the scheduling
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 =
      rm.getResourceScheduler().getNodeReport(nm1.getNodeId());

  // check node report
  Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1);
  am1.schedule(); // send the request

  // kick the scheduler, container reservation should not happen
  nm1.nodeHeartbeat(true);
  Thread.sleep(1000);
  AllocateResponse allocResponse = am1.schedule();
  ApplicationResourceUsageReport report =
      rm.getResourceScheduler().getAppResourceUsageReport(
        attempt1.getAppAttemptId());
  Assert.assertEquals(0, allocResponse.getAllocatedContainers().size());
  Assert.assertEquals(0, report.getNumReservedContainers());

  // container should get allocated on this node
  nm2.nodeHeartbeat(true);

  while (allocResponse.getAllocatedContainers().size() == 0) {
    Thread.sleep(100);
    allocResponse = am1.schedule();
  }
  report =
      rm.getResourceScheduler().getAppResourceUsageReport(
        attempt1.getAppAttemptId());
  Assert.assertEquals(1, allocResponse.getAllocatedContainers().size());
  Assert.assertEquals(0, report.getNumReservedContainers());
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestCapacityScheduler.java

示例6: testExcessReservationThanNodeManagerCapacity

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 3000000)
public void testExcessReservationThanNodeManagerCapacity() throws Exception {
  @SuppressWarnings("resource")
  MockRM rm = new MockRM(conf);
  rm.start();

  // Register node1
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 2 * GB, 4, 4);
  MockNM nm2 = rm.registerNode("127.0.0.1:2234", 3 * GB, 4, 4);

  nm1.nodeHeartbeat(true);
  nm2.nodeHeartbeat(true);

  // wait..
  int waitCount = 20;
  int size = rm.getRMContext().getRMNodes().size();
  while ((size = rm.getRMContext().getRMNodes().size()) != 2
      && waitCount-- > 0) {
    LOG.info("Waiting for node managers to register : " + size);
    Thread.sleep(100);
  }
  Assert.assertEquals(2, rm.getRMContext().getRMNodes().size());
  // Submit an application
  RMApp app1 = rm.submitApp(128);

  // kick the scheduling
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();

  LOG.info("sending container requests ");
  am1.addRequests(new String[] {"*"}, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler
  nm1.nodeHeartbeat(true);
  int waitCounter = 20;
  LOG.info("heartbeating nm1");
  while (alloc1Response.getAllocatedContainers().size() < 1
      && waitCounter-- > 0) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(500);
    alloc1Response = am1.schedule();
  }
  LOG.info("received container : "
      + alloc1Response.getAllocatedContainers().size());

  // No container should be allocated.
  // Internally it should not been reserved.
  Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0);

  LOG.info("heartbeating nm2");
  waitCounter = 20;
  nm2.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1
      && waitCounter-- > 0) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(500);
    alloc1Response = am1.schedule();
  }
  LOG.info("received container : "
      + alloc1Response.getAllocatedContainers().size());
  Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1);

  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestContainerAllocation.java

示例7: testNodeUsableEvent

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testNodeUsableEvent() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  final Dispatcher dispatcher = getDispatcher();
  YarnConfiguration conf = new YarnConfiguration();
  MockRM rm = new MockRM(conf) {
    @Override
    protected Dispatcher createDispatcher() {
      return dispatcher;
    }
  };
  rm.start();
  MockNM nm1 = rm.registerNode("h1:1234", 28000);
  NodesListManager nodesListManager = rm.getNodesListManager();
  Resource clusterResource = Resource.newInstance(28000, 8);
  RMNode rmnode = MockNodes.newNodeInfo(1, clusterResource);

  // Create killing APP
  RMApp killrmApp = rm.submitApp(200);
  rm.killApp(killrmApp.getApplicationId());
  rm.waitForState(killrmApp.getApplicationId(), RMAppState.KILLED);

  // Create finish APP
  RMApp finshrmApp = rm.submitApp(2000);
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt = finshrmApp.getCurrentAppAttempt();
  MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
  am.registerAppAttempt();
  am.unregisterAppAttempt();
  nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
  am.waitForState(RMAppAttemptState.FINISHED);

  // Create submitted App
  RMApp subrmApp = rm.submitApp(200);

  // Fire Event for NODE_USABLE
  nodesListManager.handle(new NodesListManagerEvent(
      NodesListManagerEventType.NODE_USABLE, rmnode));
  if (applist.size() > 0) {
    Assert.assertTrue(
        "Event based on running app expected " + subrmApp.getApplicationId(),
        applist.contains(subrmApp.getApplicationId()));
    Assert.assertFalse(
        "Event based on finish app not expected "
            + finshrmApp.getApplicationId(),
        applist.contains(finshrmApp.getApplicationId()));
    Assert.assertFalse(
        "Event based on killed app not expected "
            + killrmApp.getApplicationId(),
        applist.contains(killrmApp.getApplicationId()));
  } else {
    Assert.fail("Events received should have beeen more than 1");
  }
  applist.clear();

  // Fire Event for NODE_UNUSABLE
  nodesListManager.handle(new NodesListManagerEvent(
      NodesListManagerEventType.NODE_UNUSABLE, rmnode));
  if (applist.size() > 0) {
    Assert.assertTrue(
        "Event based on running app expected " + subrmApp.getApplicationId(),
        applist.contains(subrmApp.getApplicationId()));
    Assert.assertFalse(
        "Event based on finish app not expected "
            + finshrmApp.getApplicationId(),
        applist.contains(finshrmApp.getApplicationId()));
    Assert.assertFalse(
        "Event based on killed app not expected "
            + killrmApp.getApplicationId(),
        applist.contains(killrmApp.getApplicationId()));
  } else {
    Assert.fail("Events received should have beeen more than 1");
  }

}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:TestNodesListManager.java

示例8: testResourceOverCommit

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testResourceOverCommit() throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();

  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);

  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 =
      rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());

  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());

  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap =
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(),
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request =
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  rm.getAdminService().updateNodeResource(request);

  // Now, the used resource is still 4 GB, and available resource is minus
  // value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());

  // Check container can complete successfully in case of resource
  // over-commitment.
  ContainerStatus containerStatus =
      BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE,
          "", 0, c1.getResource());
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses()
      .size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  rm.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:82,代码来源:TestFifoScheduler.java

示例9: testAppReservationWithDominantResourceCalculator

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testAppReservationWithDominantResourceCalculator() throws Exception {
  CapacitySchedulerConfiguration csconf =
      new CapacitySchedulerConfiguration();
  csconf.setResourceComparator(DominantResourceCalculator.class);

  YarnConfiguration conf = new YarnConfiguration(csconf);
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
    ResourceScheduler.class);

  MockRM rm = new MockRM(conf);
  rm.start();

  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10 * GB, 1);

  // register extra nodes to bump up cluster resource
  MockNM nm2 = rm.registerNode("127.0.0.1:1235", 10 * GB, 4);
  rm.registerNode("127.0.0.1:1236", 10 * GB, 4);

  RMApp app1 = rm.submitApp(1024);
  // kick the scheduling
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 =
      rm.getResourceScheduler().getNodeReport(nm1.getNodeId());

  // check node report
  Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1);
  am1.schedule(); // send the request

  // kick the scheduler, container reservation should not happen
  nm1.nodeHeartbeat(true);
  Thread.sleep(1000);
  AllocateResponse allocResponse = am1.schedule();
  ApplicationResourceUsageReport report =
      rm.getResourceScheduler().getAppResourceUsageReport(
        attempt1.getAppAttemptId());
  Assert.assertEquals(0, allocResponse.getAllocatedContainers().size());
  Assert.assertEquals(0, report.getNumReservedContainers());

  // container should get allocated on this node
  nm2.nodeHeartbeat(true);

  while (allocResponse.getAllocatedContainers().size() == 0) {
    Thread.sleep(100);
    allocResponse = am1.schedule();
  }
  report =
      rm.getResourceScheduler().getAppResourceUsageReport(
        attempt1.getAppAttemptId());
  Assert.assertEquals(1, allocResponse.getAllocatedContainers().size());
  Assert.assertEquals(0, report.getNumReservedContainers());
  rm.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:61,代码来源:TestCapacityScheduler.java

示例10: testExcessReservationThanNodeManagerCapacity

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 3000000)
public void testExcessReservationThanNodeManagerCapacity() throws Exception {
  @SuppressWarnings("resource")
  MockRM rm = new MockRM(conf);
  rm.start();

  // Register node1
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 2 * GB, 4);
  MockNM nm2 = rm.registerNode("127.0.0.1:2234", 3 * GB, 4);

  nm1.nodeHeartbeat(true);
  nm2.nodeHeartbeat(true);

  // wait..
  int waitCount = 20;
  int size = rm.getRMContext().getRMNodes().size();
  while ((size = rm.getRMContext().getRMNodes().size()) != 2
      && waitCount-- > 0) {
    LOG.info("Waiting for node managers to register : " + size);
    Thread.sleep(100);
  }
  Assert.assertEquals(2, rm.getRMContext().getRMNodes().size());
  // Submit an application
  RMApp app1 = rm.submitApp(128);

  // kick the scheduling
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();

  LOG.info("sending container requests ");
  am1.addRequests(new String[] {"*"}, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler
  nm1.nodeHeartbeat(true);
  int waitCounter = 20;
  LOG.info("heartbeating nm1");
  while (alloc1Response.getAllocatedContainers().size() < 1
      && waitCounter-- > 0) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(500);
    alloc1Response = am1.schedule();
  }
  LOG.info("received container : "
      + alloc1Response.getAllocatedContainers().size());

  // No container should be allocated.
  // Internally it should not been reserved.
  Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0);

  LOG.info("heartbeating nm2");
  waitCounter = 20;
  nm2.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1
      && waitCounter-- > 0) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(500);
    alloc1Response = am1.schedule();
  }
  LOG.info("received container : "
      + alloc1Response.getAllocatedContainers().size());
  Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1);

  rm.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:68,代码来源:TestContainerAllocation.java


注:本文中的org.apache.hadoop.yarn.server.resourcemanager.MockRM.sendAMLaunched方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。