当前位置: 首页>>代码示例>>Java>>正文


Java AllocateResponse.getAllocatedContainers方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse.getAllocatedContainers方法的典型用法代码示例。如果您正苦于以下问题:Java AllocateResponse.getAllocatedContainers方法的具体用法?Java AllocateResponse.getAllocatedContainers怎么用?Java AllocateResponse.getAllocatedContainers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse的用法示例。


在下文中一共展示了AllocateResponse.getAllocatedContainers方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: launch

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@Override
public void launch(int n, List<String> command, List<String> hosts,
    boolean verbose) throws Exception {
  final List<String> chmod = new ArrayList<String>();
  chmod.add("chmod");
  chmod.add("a+rx");
  chmod.add(System.getenv(Environment.LOG_DIRS.name()));
  final ProcessBuilder pb = new ProcessBuilder(chmod);
  pb.redirectOutput(Redirect.INHERIT);
  pb.redirectError(Redirect.INHERIT);
  pb.start();
  redirect(command); // TODO clone before mutating the command
  rmClient.init(conf);
  rmClient.start();
  final NMClient nmClient = NMClient.createNMClient();
  nmClient.init(conf);
  nmClient.start();
  rmClient.registerApplicationMaster("", 0, "");
  for (int i = 0; i < n; ++i) {
    final ContainerRequest request = new ContainerRequest(
        Resource.newInstance(256, 1), null, null, Priority.newInstance(0));
    rmClient.addContainerRequest(request);
  }
  int responseId = 0;
  for (int containers = 0; containers < n;) {
    final AllocateResponse response = rmClient.allocate(responseId++);
    for (final Container container : response.getAllocatedContainers()) {
      final ContainerLaunchContext ctx = ContainerLaunchContext
          .newInstance(null, null, command, null, null, null);
      nmClient.startContainer(container, ctx);
    }
    containers += response.getAllocatedContainers().size();
    try {
      Thread.sleep(100);
    } catch (final InterruptedException e) {
    }
  }
}
 
开发者ID:x10-lang,项目名称:apgas,代码行数:39,代码来源:Launcher.java

示例2: doAllocate

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@Override
protected AllocateResult doAllocate(float progress) throws Exception {
  AllocateResponse allocateResponse = amrmClient.allocate(progress);
  List<RunnableProcessLauncher> launchers
    = Lists.newArrayListWithCapacity(allocateResponse.getAllocatedContainers().size());

  for (Container container : allocateResponse.getAllocatedContainers()) {
    launchers.add(new RunnableProcessLauncher(new Hadoop21YarnContainerInfo(container), nmClient));
  }

  List<YarnContainerStatus> completed = ImmutableList.copyOf(
    Iterables.transform(allocateResponse.getCompletedContainersStatuses(), STATUS_TRANSFORM));

  return new AllocateResult(launchers, completed);
}
 
开发者ID:apache,项目名称:twill,代码行数:16,代码来源:Hadoop21YarnAMClient.java

示例3: testResourceOverCommit

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@Test
public void testResourceOverCommit() throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = rm.adminService;
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestFifoScheduler.java

示例4: testAppCleanupWhenRMRestartedBeforeAppFinished

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@SuppressWarnings("resource")
@Test(timeout = 60000)
public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  // start RM
  MockRM rm1 = new MockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 1024, rm1.getResourceTrackerService());
  nm1.registerNode();
  MockNM nm2 =
      new MockNM("127.0.0.1:5678", 1024, rm1.getResourceTrackerService());
  nm2.registerNode();

  // create app and launch the AM
  RMApp app0 = rm1.submitApp(200);
  MockAM am0 = launchAM(app0, rm1, nm1);

  // alloc another container on nm2
  AllocateResponse allocResponse =
      am0.allocate(Arrays.asList(ResourceRequest.newInstance(
          Priority.newInstance(1), "*", Resource.newInstance(1024, 0, 0), 1)),
          null);
  while (null == allocResponse.getAllocatedContainers()
      || allocResponse.getAllocatedContainers().isEmpty()) {
    nm2.nodeHeartbeat(true);
    allocResponse = am0.allocate(null, null);
    Thread.sleep(1000);
  }

  // start new RM
  MockRM rm2 = new MockRM(conf, memStore);
  rm2.start();

  // nm1/nm2 register to rm2, and do a heartbeat
  nm1.setResourceTrackerService(rm2.getResourceTrackerService());
  nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
    ContainerId.newContainerId(am0.getApplicationAttemptId(), 1),
    ContainerState.COMPLETE, Resource.newInstance(1024, 1, 1), "", 0,
    Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
  nm2.setResourceTrackerService(rm2.getResourceTrackerService());
  nm2.registerNode(Arrays.asList(app0.getApplicationId()));

  // assert app state has been saved.
  rm2.waitForState(app0.getApplicationId(), RMAppState.FAILED);

  // wait for application cleanup message received on NM1
  waitForAppCleanupMessageRecved(nm1, app0.getApplicationId());

  // wait for application cleanup message received on NM2
  waitForAppCleanupMessageRecved(nm2, app0.getApplicationId());

  rm1.stop();
  rm2.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestApplicationCleanup.java

示例5: allocateContainers

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
private Set<Container> allocateContainers(
    AMRMClientImpl<ContainerRequest> rmClient, int num)
    throws YarnException, IOException {
  // setup container request
  Resource capability = Resource.newInstance(1024, 0);
  Priority priority = Priority.newInstance(0);
  String node = nodeReports.get(0).getNodeId().getHost();
  String rack = nodeReports.get(0).getRackName();
  String[] nodes = new String[] {node};
  String[] racks = new String[] {rack};

  for (int i = 0; i < num; ++i) {
    rmClient.addContainerRequest(new ContainerRequest(capability, nodes,
        racks, priority));
  }

  int containersRequestedAny = rmClient.remoteRequestsTable.get(priority)
      .get(ResourceRequest.ANY).get(capability).remoteRequest
      .getNumContainers();

  // RM should allocate container within 2 calls to allocate()
  int allocatedContainerCount = 0;
  int iterationsLeft = 2;
  Set<Container> containers = new TreeSet<Container>();
  while (allocatedContainerCount < containersRequestedAny
      && iterationsLeft > 0) {
    AllocateResponse allocResponse = rmClient.allocate(0.1f);

    allocatedContainerCount += allocResponse.getAllocatedContainers().size();
    for(Container container : allocResponse.getAllocatedContainers()) {
      containers.add(container);
    }
    if (!allocResponse.getNMTokens().isEmpty()) {
      for (NMToken token : allocResponse.getNMTokens()) {
        rmClient.getNMTokenCache().setToken(token.getNodeId().toString(),
            token.getToken());
      }
    }
    if(allocatedContainerCount < containersRequestedAny) {
      // sleep to let NM's heartbeat to RM and trigger allocations
      sleep(1000);
    }

    --iterationsLeft;
  }
  return containers;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestNMClient.java

示例6: testAppCleanupWhenRMRestartedBeforeAppFinished

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@SuppressWarnings("resource")
@Test(timeout = 60000)
public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  // start RM
  MockRM rm1 = new MockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 1024, rm1.getResourceTrackerService());
  nm1.registerNode();
  MockNM nm2 =
      new MockNM("127.0.0.1:5678", 1024, rm1.getResourceTrackerService());
  nm2.registerNode();

  // create app and launch the AM
  RMApp app0 = rm1.submitApp(200);
  MockAM am0 = launchAM(app0, rm1, nm1);

  // alloc another container on nm2
  AllocateResponse allocResponse =
      am0.allocate(Arrays.asList(ResourceRequest.newInstance(
          Priority.newInstance(1), "*", Resource.newInstance(1024, 0), 1)),
          null);
  while (null == allocResponse.getAllocatedContainers()
      || allocResponse.getAllocatedContainers().isEmpty()) {
    nm2.nodeHeartbeat(true);
    allocResponse = am0.allocate(null, null);
    Thread.sleep(1000);
  }

  // start new RM
  MockRM rm2 = new MockRM(conf, memStore);
  rm2.start();

  // nm1/nm2 register to rm2, and do a heartbeat
  nm1.setResourceTrackerService(rm2.getResourceTrackerService());
  nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
    ContainerId.newContainerId(am0.getApplicationAttemptId(), 1),
    ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0,
    Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
  nm2.setResourceTrackerService(rm2.getResourceTrackerService());
  nm2.registerNode(Arrays.asList(app0.getApplicationId()));

  // assert app state has been saved.
  rm2.waitForState(app0.getApplicationId(), RMAppState.FAILED);

  // wait for application cleanup message received on NM1
  waitForAppCleanupMessageRecved(nm1, app0.getApplicationId());

  // wait for application cleanup message received on NM2
  waitForAppCleanupMessageRecved(nm2, app0.getApplicationId());

  rm1.stop();
  rm2.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:59,代码来源:TestApplicationCleanup.java

示例7: testResourceOverCommit

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testResourceOverCommit() throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();

  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);

  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 =
      rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());

  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());

  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap =
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(),
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request =
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  rm.getAdminService().updateNodeResource(request);

  // Now, the used resource is still 4 GB, and available resource is minus
  // value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());

  // Check container can complete successfully in case of resource
  // over-commitment.
  ContainerStatus containerStatus =
      BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE,
          "", 0, c1.getResource());
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses()
      .size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  rm.stop();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:82,代码来源:TestFifoScheduler.java

示例8: allocateAndStartContainers

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
private List<Container> allocateAndStartContainers(
    final AMRMClient<ContainerRequest> amClient, final NMClient nmClient,
    int num) throws YarnException, IOException {
  // set up allocation requests
  for (int i = 0; i < num; ++i) {
    amClient.addContainerRequest(
        new ContainerRequest(capability, nodes, racks, priority));
  }
  // send allocation requests
  amClient.allocate(0.1f);
  // sleep to let NM's heartbeat to RM and trigger allocations
  sleep(150);
  // get allocations
  AllocateResponse allocResponse = amClient.allocate(0.1f);
  List<Container> containers = allocResponse.getAllocatedContainers();
  Assert.assertEquals(num, containers.size());
  // build container launch context
  Credentials ts = new Credentials();
  DataOutputBuffer dob = new DataOutputBuffer();
  ts.writeTokenStorageToStream(dob);
  ByteBuffer securityTokens =
      ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  // start a process long enough for increase/decrease action to take effect
  ContainerLaunchContext clc = BuilderUtils.newContainerLaunchContext(
      Collections.<String, LocalResource>emptyMap(),
      new HashMap<String, String>(), Arrays.asList("sleep", "100"),
      new HashMap<String, ByteBuffer>(), securityTokens,
      new HashMap<ApplicationAccessType, String>());
  // start the containers and make sure they are in RUNNING state
  try {
    for (int i = 0; i < num; i++) {
      Container container = containers.get(i);
      nmClient.startContainer(container, clc);
      // NodeManager may still need some time to get the stable
      // container status
      while (true) {
        ContainerStatus status = nmClient.getContainerStatus(
            container.getId(), container.getNodeId());
        if (status.getState() == ContainerState.RUNNING) {
          break;
        }
        sleep(100);
      }
    }
  } catch (YarnException e) {
    throw new AssertionError("Exception is not expected: " + e);
  }
  // sleep to let NM's heartbeat to RM to confirm container launch
  sleep(200);
  return containers;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:52,代码来源:TestAMRMClient.java

示例9: testResourceOverCommit

import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; //导入方法依赖的package包/类
@Test
public void testResourceOverCommit() throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = rm.adminService;
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  rm.stop();
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:80,代码来源:TestFifoScheduler.java


注:本文中的org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse.getAllocatedContainers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。