当前位置: 首页>>代码示例>>Java>>正文


Java MockRM.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.server.resourcemanager.MockRM.close方法的典型用法代码示例。如果您正苦于以下问题:Java MockRM.close方法的具体用法?Java MockRM.close怎么用?Java MockRM.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.yarn.server.resourcemanager.MockRM的用法示例。


在下文中一共展示了MockRM.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testContainerAllocationWithSingleUserLimits

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test (timeout = 300000)
public void testContainerAllocationWithSingleUserLimits() throws Exception {
  final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
  mgr.init(conf);

  // set node -> label
  mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y"));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
      NodeId.newInstance("h2", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
  rm1.registerNode("h2:1234", 8000); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>

  // launch an app to queue a1 (label = x), and check all container will
  // be allocated in h1
  RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  
  // A has only 10% of x, so it can only allocate one container in label=empty
  ContainerId containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
  Assert.assertTrue(rm1.waitForState(nm3, containerId,
        RMContainerState.ALLOCATED, 10 * 1000));
  // Cannot allocate 2nd label=empty container
  containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
  Assert.assertFalse(rm1.waitForState(nm3, containerId,
        RMContainerState.ALLOCATED, 10 * 1000));

  // A has default user limit = 100, so it can use all resource in label = x
  // We can allocate floor(8000 / 1024) = 7 containers
  for (int id = 3; id <= 8; id++) {
    containerId =
        ContainerId.newContainerId(am1.getApplicationAttemptId(), id);
    am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
    Assert.assertTrue(rm1.waitForState(nm1, containerId,
        RMContainerState.ALLOCATED, 10 * 1000));
  }
  rm1.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestContainerAllocation.java

示例2: testContainerAllocateWithLabels

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test (timeout = 120000)
public void testContainerAllocateWithLabels() throws Exception {
  // set node -> label
  mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y"));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
      NodeId.newInstance("h2", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
  MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
  
  ContainerId containerId;

  // launch an app to queue a1 (label = x), and check all container will
  // be allocated in h1
  RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm3);

  // request a container.
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
  containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm1, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
      "h1");

  // launch an app to queue b1 (label = y), and check all container will
  // be allocated in h2
  RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1");
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3);

  // request a container.
  am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
  containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm1, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1,
      "h2");
  
  // launch an app to queue c1 (label = ""), and check all container will
  // be allocated in h3
  RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1");
  MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3);

  // request a container.
  am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
      "h3");

  rm1.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestContainerAllocation.java

示例3: testContainerAllocateWithDefaultQueueLabels

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test (timeout = 120000)
public void testContainerAllocateWithDefaultQueueLabels() throws Exception {
  // This test is pretty much similar to testContainerAllocateWithLabel.
  // Difference is, this test doesn't specify label expression in ResourceRequest,
  // instead, it uses default queue label expression

  // set node -> label
  mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y"));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
      NodeId.newInstance("h2", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
  MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
  
  ContainerId containerId;

  // launch an app to queue a1 (label = x), and check all container will
  // be allocated in h1
  RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);

  // request a container.
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm1, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
      "h1");

  // launch an app to queue b1 (label = y), and check all container will
  // be allocated in h2
  RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1");
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);

  // request a container.
  am2.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1,
      "h2");
  
  // launch an app to queue c1 (label = ""), and check all container will
  // be allocated in h3
  RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1");
  MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3);

  // request a container.
  am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
      "h3");

  rm1.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestContainerAllocation.java

示例4: testExcessReservationWillBeUnreserved

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testExcessReservationWillBeUnreserved() throws Exception {
  /**
   * Test case: Submit two application (app1/app2) to a queue. And there's one
   * node with 8G resource in the cluster. App1 allocates a 6G container, Then
   * app2 asks for a 4G container. App2's request will be reserved on the
   * node.
   * 
   * Before next node heartbeat, app2 cancels the reservation, we should found
   * the reserved resource is cancelled as well.
   */
  // inject node label manager
  MockRM rm1 = new MockRM();

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
  MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB);

  // launch an app to queue, AM container should be launched in nm1
  RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  
  // launch another app to queue, AM container should be launched in nm1
  RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "default");
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);

  am1.allocate("*", 4 * GB, 1, new ArrayList<ContainerId>());
  am2.allocate("*", 4 * GB, 1, new ArrayList<ContainerId>());
  
  CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
  RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
  
  // Do node heartbeats 2 times
  // First time will allocate container for app1, second time will reserve
  // container for app2
  cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
  cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
  
  // App2 will get preference to be allocated on node1, and node1 will be all
  // used by App2.
  FiCaSchedulerApp schedulerApp1 =
      cs.getApplicationAttempt(am1.getApplicationAttemptId());
  FiCaSchedulerApp schedulerApp2 =
      cs.getApplicationAttempt(am2.getApplicationAttemptId());

  // Check if a 4G contaienr allocated for app1, and nothing allocated for app2
  Assert.assertEquals(2, schedulerApp1.getLiveContainers().size());
  Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
  Assert.assertTrue(schedulerApp2.getReservedContainers().size() > 0);
  
  // NM1 has available resource = 2G (8G - 2 * 1G - 4G)
  Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
      .getAvailableResource().getMemory());
  Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
  // Usage of queue = 4G + 2 * 1G + 4G (reserved)
  Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage()
      .getUsed().getMemory());
  
  // Cancel asks of app2 and re-kick RM
  am2.allocate("*", 4 * GB, 0, new ArrayList<ContainerId>());
  cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
  
  // App2's reservation will be cancelled
  Assert.assertTrue(schedulerApp2.getReservedContainers().size() == 0);
  Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
      .getAvailableResource().getMemory());
  Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
  Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage()
      .getUsed().getMemory());

  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:74,代码来源:TestContainerAllocation.java

示例5: testAtleastOneAMRunPerPartition

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testAtleastOneAMRunPerPartition() throws Exception {
  /*
   * Test Case:
   * Even though am-resource-limit per queue/partition may cross if we
   * activate an app (high am resource demand), we have to activate it
   * since no other apps are running in that Queue/Partition. Here also
   * we run one test case for partition level and one in queue level to
   * ensure no breakage in existing functionality.
   *
   * Queue a1 supports labels (x,y). Configure am-resource-limit as 0.15 (x)
   * Queue c1 supports default label. Configure am-resource-limit as 0.15
   *
   * Queue A1 for label X can only support 1.5Gb AM resource.
   * Queue C1 (empty label) can support 1.5Gb AM resource.
   *
   * Verify atleast one AM is launched in each Queue.
   */
  simpleNodeLabelMappingToManager();
  CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
      TestUtils.getConfigurationWithQueueLabels(conf);

  // After getting queue conf, configure AM resource percent for Queue A1
  // as 0.15 (Label X) and for Queue C1 as 0.15 (Empty Label)
  final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
  final String C1 = CapacitySchedulerConfiguration.ROOT + ".c" + ".c1";
  config.setMaximumAMResourcePercentPerPartition(A1, "x", 0.15f);
  config.setMaximumApplicationMasterResourcePerQueuePercent(C1, 0.15f);
  // inject node label manager
  MockRM rm1 = new MockRM(config) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
  rm1.registerNode("h2:1234", 10 * GB); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 10 * GB); // label = <empty>

  // Submit app1 (2 GB) to Queue A1 and label X
  RMApp app1 = rm1.submitApp(2 * GB, "app", "user", null, "a1", "x");
  // This app must be activated eventhough the am-resource per-partition
  // limit is only for 1.5GB.
  MockRM.launchAndRegisterAM(app1, rm1, nm1);

  // Submit 2nd app to label "X" with one GB and it must be pending since
  // am-resource per-partition limit is crossed (1.5 GB was the limit).
  rm1.submitApp(GB, "app", "user", null, "a1", "x");

  CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
  LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
  Assert.assertNotNull(leafQueue);

  // Only 1 app will be activated as am-limit for partition "x" is 0.15
  Assert.assertEquals(1, leafQueue.getNumActiveApplications());
  Assert.assertEquals(1, leafQueue.getNumPendingApplications());

  // Now verify the same test case in Queue C1 which takes default label
  // to see queue level am-resource-limit is still working as expected.

  // Submit an app to Queue C1 with empty label (2 GB)
  RMApp app3 = rm1.submitApp(2 * GB, "app", "user", null, "c1");
  // This app must be activated even though the am-resource per-queue
  // limit is only for 1.5GB
  MockRM.launchAndRegisterAM(app3, rm1, nm3);

  // Submit 2nd app to C1 (Default label, hence am-limit per-queue will be
  // considered).
  rm1.submitApp(GB, "app", "user", null, "c1");

  leafQueue = (LeafQueue) cs.getQueue("c1");
  Assert.assertNotNull(leafQueue);

  // 1 app will be activated (and it has AM resource more than queue limit)
  Assert.assertEquals(1, leafQueue.getNumActiveApplications());
  Assert.assertEquals(1, leafQueue.getNumPendingApplications());
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:82,代码来源:TestApplicationLimitsByPartition.java

示例6: testDefaultAMLimitFromQueueForPartition

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testDefaultAMLimitFromQueueForPartition() throws Exception {
  /*
   * Test Case:
   * Configure AM resource limit per queue level. If partition level config
   * is not found, we will be considering per-queue level am-limit. Ensure
   * this is working as expected.
   *
   * Queue A1 am-resource limit to be configured as 0.2 (not for partition x)
   *
   * Eventhough per-partition level config is not done, CS should consider
   * the configuration done for queue level.
   */
  simpleNodeLabelMappingToManager();
  CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
      TestUtils.getConfigurationWithQueueLabels(conf);

  // After getting queue conf, configure AM resource percent for Queue A1
  // as 0.2 (not for partition, rather in queue level)
  final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
  config.setMaximumApplicationMasterResourcePerQueuePercent(A1, 0.2f);
  // inject node label manager
  MockRM rm1 = new MockRM(config) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
  rm1.registerNode("h2:1234", 10 * GB); // label = y
  rm1.registerNode("h3:1234", 10 * GB); // label = <empty>

  // Submit app1 (2 GB) to Queue A1 and label X
  RMApp app1 = rm1.submitApp(2 * GB, "app", "user", null, "a1", "x");

  // Submit 2nd app to label "X" with one GB. Since queue am-limit is 2GB,
  // 2nd app will be pending and first one will get activated.
  RMApp pendingApp = rm1.submitApp(GB, "app", "user", null, "a1", "x");

  CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
  LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
  Assert.assertNotNull(leafQueue);

  // Only 1 app will be activated as am-limit for queue is 0.2 and same is
  // used for partition "x" also.
  Assert.assertEquals(1, leafQueue.getNumActiveApplications());
  Assert.assertEquals(1, leafQueue.getNumPendingApplications());
  Assert.assertTrue("AM diagnostics not set properly", app1.getDiagnostics()
      .toString().contains(AMState.ACTIVATED.getDiagnosticMessage()));
  Assert.assertTrue("AM diagnostics not set properly",
      pendingApp.getDiagnostics().toString()
          .contains(AMState.INACTIVATED.getDiagnosticMessage()));
  Assert.assertTrue("AM diagnostics not set properly",
      pendingApp.getDiagnostics().toString()
          .contains(CSAMContainerLaunchDiagnosticsConstants.QUEUE_AM_RESOURCE_LIMIT_EXCEED));
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:61,代码来源:TestApplicationLimitsByPartition.java

示例7: testContainerAllocationWithSingleUserLimits

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test (timeout = 300000)
public void testContainerAllocationWithSingleUserLimits() throws Exception {
  final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
  mgr.init(conf);

  // set node -> label
  mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
      NodeId.newInstance("h2", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
  rm1.registerNode("h2:1234", 8000); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>

  // launch an app to queue a1 (label = x), and check all container will
  // be allocated in h1
  RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  
  // A has only 10% of x, so it can only allocate one container in label=empty
  ContainerId containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
  Assert.assertTrue(rm1.waitForState(nm3, containerId,
        RMContainerState.ALLOCATED, 10 * 1000));
  // Cannot allocate 2nd label=empty container
  containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
  Assert.assertFalse(rm1.waitForState(nm3, containerId,
        RMContainerState.ALLOCATED, 10 * 1000));

  // A has default user limit = 100, so it can use all resource in label = x
  // We can allocate floor(8000 / 1024) = 7 containers
  for (int id = 3; id <= 8; id++) {
    containerId =
        ContainerId.newContainerId(am1.getApplicationAttemptId(), id);
    am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
    Assert.assertTrue(rm1.waitForState(nm1, containerId,
        RMContainerState.ALLOCATED, 10 * 1000));
  }
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestNodeLabelContainerAllocation.java

示例8: testContainerAllocateWithLabels

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test (timeout = 120000)
public void testContainerAllocateWithLabels() throws Exception {
  // set node -> label
  mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
      NodeId.newInstance("h2", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
  MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
  
  ContainerId containerId;

  // launch an app to queue a1 (label = x), and check all container will
  // be allocated in h1
  RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm3);

  // request a container.
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
  containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm1, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
      "h1");

  // launch an app to queue b1 (label = y), and check all container will
  // be allocated in h2
  RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1");
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3);

  // request a container.
  am2.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "y");
  containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm1, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1,
      "h2");
  
  // launch an app to queue c1 (label = ""), and check all container will
  // be allocated in h3
  RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1");
  MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3);

  // request a container.
  am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
      "h3");

  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:72,代码来源:TestNodeLabelContainerAllocation.java

示例9: testContainerAllocateWithDefaultQueueLabels

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test (timeout = 120000)
public void testContainerAllocateWithDefaultQueueLabels() throws Exception {
  // This test is pretty much similar to testContainerAllocateWithLabel.
  // Difference is, this test doesn't specify label expression in ResourceRequest,
  // instead, it uses default queue label expression

  // set node -> label
  mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"),
      NodeId.newInstance("h2", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x
  MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y
  MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = <empty>
  
  ContainerId containerId;

  // launch an app to queue a1 (label = x), and check all container will
  // be allocated in h1
  RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);

  // request a container.
  am1.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm1, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1,
      "h1");

  // launch an app to queue b1 (label = y), and check all container will
  // be allocated in h2
  RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1");
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);

  // request a container.
  am2.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1,
      "h2");
  
  // launch an app to queue c1 (label = ""), and check all container will
  // be allocated in h3
  RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1");
  MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3);

  // request a container.
  am3.allocate("*", 1024, 1, new ArrayList<ContainerId>());
  containerId = ContainerId.newContainerId(am3.getApplicationAttemptId(), 2);
  Assert.assertFalse(rm1.waitForState(nm2, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  Assert.assertTrue(rm1.waitForState(nm3, containerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1,
      "h3");

  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:76,代码来源:TestNodeLabelContainerAllocation.java

示例10: testPreferenceOfNeedyAppsTowardsNodePartitions

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test
public void testPreferenceOfNeedyAppsTowardsNodePartitions() throws Exception {
  /**
   * Test case: Submit two application to a queue (app1 first then app2), app1
   * asked for no-label, app2 asked for label=x, when node1 has label=x
   * doing heart beat, app2 will get allocation first, even if app2 submits later
   * than app1
   */
  
  // set node -> label
  mgr.addToCluserNodeLabels(ImmutableSet.of(
      NodeLabel.newInstance("x"), NodeLabel.newInstance("y", false)));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y
  MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>

  // launch an app to queue b1 (label = y), AM container should be launched in nm2
  RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
  
  // launch another app to queue b1 (label = y), AM container should be launched in nm2
  RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b1");
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);

  // request container and nm1 do heartbeat (nm2 has label=y), note that app1
  // request non-labeled container, and app2 request labeled container, app2
  // will get allocated first even if app1 submitted first.  
  am1.allocate("*", 1 * GB, 8, new ArrayList<ContainerId>());
  am2.allocate("*", 1 * GB, 8, new ArrayList<ContainerId>(), "y");
  
  CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
  RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
  RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
  
  // Do node heartbeats many times
  for (int i = 0; i < 50; i++) {
    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
  }
  
  // App2 will get preference to be allocated on node1, and node1 will be all
  // used by App2.
  FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
  FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
  // app1 get nothing in nm1 (partition=y)
  checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), schedulerApp1);
  checkNumOfContainersInAnAppOnGivenNode(9, nm2.getNodeId(), schedulerApp1);
  // app2 get all resource in nm1 (partition=y)
  checkNumOfContainersInAnAppOnGivenNode(8, nm1.getNodeId(), schedulerApp2);
  checkNumOfContainersInAnAppOnGivenNode(1, nm2.getNodeId(), schedulerApp2);
  
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:65,代码来源:TestNodeLabelContainerAllocation.java

示例11: testPreferenceOfNeedyPrioritiesUnderSameAppTowardsNodePartitions

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test
public void
    testPreferenceOfNeedyPrioritiesUnderSameAppTowardsNodePartitions()
        throws Exception {
  /**
   * Test case: Submit one application, it asks label="" in priority=1 and
   * label="x" in priority=2, when a node with label=x heartbeat, priority=2
   * will get allocation first even if there're pending resource in priority=1
   */
  
  // set node -> label
  mgr.addToCluserNodeLabels(ImmutableSet.of(
      NodeLabel.newInstance("x"), NodeLabel.newInstance("y", false)));
  // Makes y to be non-exclusive node labels
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y
  MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>
  
  ContainerId nextContainerId;

  // launch an app to queue b1 (label = y), AM container should be launched in nm3
  RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
  
  // request containers from am2, priority=1 asks for "" and priority=2 asks
  // for "y", "y" container should be allocated first
  nextContainerId =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
  am1.allocate("*", 1 * GB, 1, 1, new ArrayList<ContainerId>(), "");
  am1.allocate("*", 1 * GB, 1, 2, new ArrayList<ContainerId>(), "y");
  Assert.assertTrue(rm1.waitForState(nm1, nextContainerId,
      RMContainerState.ALLOCATED, 10 * 1000));
  
  // Check pending resource for am2, priority=1 doesn't get allocated before
  // priority=2 allocated
  checkPendingResource(rm1, 1, am1.getApplicationAttemptId(), 1 * GB);
  checkPendingResource(rm1, 2, am1.getApplicationAttemptId(), 0 * GB);
  
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:52,代码来源:TestNodeLabelContainerAllocation.java

示例12: testNonLabeledResourceRequestGetPreferrenceToNonLabeledNode

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test
public void testNonLabeledResourceRequestGetPreferrenceToNonLabeledNode()
    throws Exception {
  /**
   * Test case: Submit one application, it asks 6 label="" containers, NM1
   * with label=y and NM2 has no label, NM1/NM2 doing heartbeat together. Even
   * if NM1 has idle resource, containers are all allocated to NM2 since
   * non-labeled request should get allocation on non-labeled nodes first.
   */
  
  mgr.addToCluserNodeLabels(ImmutableSet.of(
      NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // label = y
  MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB); // label = <empty>
  
  ContainerId nextContainerId;

  // launch an app to queue b1 (label = y), AM container should be launched in nm3
  RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
  
  // request containers from am2, priority=1 asks for "" * 6 (id from 4 to 9),
  // nm2/nm3 do
  // heartbeat at the same time, check containers are always allocated to nm3.
  // This is to verify when there's resource available in non-labeled
  // partition, non-labeled resource should allocate to non-labeled partition
  // first.
  am1.allocate("*", 1 * GB, 6, 1, new ArrayList<ContainerId>(), "");
  for (int i = 2; i < 2 + 6; i++) {
    nextContainerId =
        ContainerId.newContainerId(am1.getApplicationAttemptId(), i);
    Assert.assertTrue(rm1.waitForState(Arrays.asList(nm1, nm2),
        nextContainerId, RMContainerState.ALLOCATED, 10 * 1000));
  }
  // no more container allocated on nm1
  checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 0);
  // all 7 (1 AM container + 6 task container) containers allocated on nm2
  checkLaunchedContainerNumOnNode(rm1, nm2.getNodeId(), 7);   
  
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestNodeLabelContainerAllocation.java

示例13: testAMContainerAllocationWillAlwaysBeExclusive

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test
public void testAMContainerAllocationWillAlwaysBeExclusive()
    throws Exception {
  /**
   * Test case: Submit one application without partition, trying to allocate a
   * node has partition=x, it should fail to allocate since AM container will
   * always respect exclusivity for partitions
   */
  
  // set node -> label
  mgr.addToCluserNodeLabels(ImmutableSet.of(
      NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
  mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));

  // inject node label manager
  MockRM rm1 = new MockRM(TestUtils.getConfigurationWithQueueLabels(conf)) {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };

  rm1.getRMContext().setNodeLabelManager(mgr);
  rm1.start();
  String nodeIdStr = "h1:1234";
  MockNM nm1 = rm1.registerNode(nodeIdStr, 8 * GB); // label = x

  // launch an app to queue b1 (label = y), AM container should be launched in nm3
  RMApp app = rm1.submitApp(1 * GB, "app", "user", null, "b1");
 
  CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
  RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
  
  // Heartbeat for many times, app1 should get nothing
  for (int i = 0; i < 50; i++) {
    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
  }

  Assert.assertTrue(
      "Scheduler diagnostics should have reason for not assigning the node",
      app.getDiagnostics().toString().contains(
          CSAMContainerLaunchDiagnosticsConstants.SKIP_AM_ALLOCATION_IN_IGNORE_EXCLUSIVE_MODE));

  Assert.assertTrue(
      "Scheduler diagnostics should have last processed node information",
      app.getDiagnostics().toString().contains(
          CSAMContainerLaunchDiagnosticsConstants.LAST_NODE_PROCESSED_MSG
              + nodeIdStr + " ( Partition : [x]"));
  Assert.assertEquals(0, cs.getSchedulerNode(nm1.getNodeId())
      .getNumContainers());
  
  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestNodeLabelContainerAllocation.java

示例14: testSimpleIncreaseContainer

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test
public void testSimpleIncreaseContainer() throws Exception {
  /**
   * Application has a container running, and the node has enough available
   * resource. Add a increase request to see if container will be increased
   */
  MockRM rm1 = new MockRM() {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);

  // app1 -> a1
  RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);

  ContainerId containerId1 =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
  sentRMContainerLaunched(rm1, containerId1);
  // am1 asks to change its AM container from 1GB to 3GB
  am1.sendContainerResizingRequest(Arrays.asList(
          ContainerResourceChangeRequest
              .newInstance(containerId1, Resources.createResource(3 * GB))),
      null);

  FiCaSchedulerApp app = getFiCaSchedulerApp(rm1, app1.getApplicationId());

  checkPendingResource(rm1, "default", 2 * GB, null);
  Assert.assertEquals(2 * GB,
      app.getAppAttemptResourceUsage().getPending().getMemory());

  // NM1 do 1 heartbeats
  CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
  RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
  cs.handle(new NodeUpdateSchedulerEvent(rmNode1));

  // Pending resource should be deducted
  checkPendingResource(rm1, "default", 0 * GB, null);
  Assert.assertEquals(0 * GB,
      app.getAppAttemptResourceUsage().getPending().getMemory());

  verifyContainerIncreased(am1.allocate(null, null), containerId1, 3 * GB);
  verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 17 * GB);

  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:50,代码来源:TestContainerResizing.java

示例15: testSimpleDecreaseContainer

import org.apache.hadoop.yarn.server.resourcemanager.MockRM; //导入方法依赖的package包/类
@Test
public void testSimpleDecreaseContainer() throws Exception {
  /**
   * Application has a container running, try to decrease the container and
   * check queue's usage and container resource will be updated.
   */
  MockRM rm1 = new MockRM() {
    @Override
    public RMNodeLabelsManager createNodeLabelManager() {
      return mgr;
    }
  };
  rm1.start();
  MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);

  // app1 -> a1
  RMApp app1 = rm1.submitApp(3 * GB, "app", "user", null, "default");
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  FiCaSchedulerApp app = getFiCaSchedulerApp(rm1, app1.getApplicationId());

  checkUsedResource(rm1, "default", 3 * GB, null);
  Assert.assertEquals(3 * GB,
      app.getAppAttemptResourceUsage().getUsed().getMemory());

  ContainerId containerId1 =
      ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
  sentRMContainerLaunched(rm1, containerId1);

  // am1 asks to change its AM container from 1GB to 3GB
  AllocateResponse response = am1.sendContainerResizingRequest(null, Arrays
      .asList(ContainerResourceChangeRequest
          .newInstance(containerId1, Resources.createResource(1 * GB))));

  verifyContainerDecreased(response, containerId1, 1 * GB);
  checkUsedResource(rm1, "default", 1 * GB, null);
  Assert.assertEquals(1 * GB,
      app.getAppAttemptResourceUsage().getUsed().getMemory());

  // Check if decreased containers added to RMNode
  RMNodeImpl rmNode =
      (RMNodeImpl) rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
  Collection<Container> decreasedContainers =
      rmNode.getToBeDecreasedContainers();
  boolean rmNodeReceivedDecreaseContainer = false;
  for (Container c : decreasedContainers) {
    if (c.getId().equals(containerId1)
        && c.getResource().equals(Resources.createResource(1 * GB))) {
      rmNodeReceivedDecreaseContainer = true;
    }
  }
  Assert.assertTrue(rmNodeReceivedDecreaseContainer);

  rm1.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:TestContainerResizing.java


注:本文中的org.apache.hadoop.yarn.server.resourcemanager.MockRM.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。