当前位置: 首页>>代码示例>>Java>>正文


Java DominantResourceFairnessPolicy类代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy的典型用法代码示例。如果您正苦于以下问题:Java DominantResourceFairnessPolicy类的具体用法?Java DominantResourceFairnessPolicy怎么用?Java DominantResourceFairnessPolicy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DominantResourceFairnessPolicy类属于org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies包,在下文中一共展示了DominantResourceFairnessPolicy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parse

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
/**
 * Returns {@link SchedulingPolicy} instance corresponding to the
 * {@link SchedulingPolicy} passed as a string. The policy can be "fair" for
 * FairSharePolicy, "fifo" for FifoPolicy, or "drf" for
 * DominantResourceFairnessPolicy. For a custom
 * {@link SchedulingPolicy}s in the RM classpath, the policy should be
 * canonical class name of the {@link SchedulingPolicy}.
 * 
 * @param policy canonical class name or "drf" or "fair" or "fifo"
 * @throws AllocationConfigurationException
 */
@SuppressWarnings("unchecked")
public static SchedulingPolicy parse(String policy)
    throws AllocationConfigurationException {
  @SuppressWarnings("rawtypes")
  Class clazz;
  String text = StringUtils.toLowerCase(policy);
  if (text.equalsIgnoreCase(FairSharePolicy.NAME)) {
    clazz = FairSharePolicy.class;
  } else if (text.equalsIgnoreCase(FifoPolicy.NAME)) {
    clazz = FifoPolicy.class;
  } else if (text.equalsIgnoreCase(DominantResourceFairnessPolicy.NAME)) {
    clazz = DominantResourceFairnessPolicy.class;
  } else {
    try {
      clazz = Class.forName(policy);
    } catch (ClassNotFoundException cnfe) {
      throw new AllocationConfigurationException(policy
          + " SchedulingPolicy class not found!");
    }
  }
  if (!SchedulingPolicy.class.isAssignableFrom(clazz)) {
    throw new AllocationConfigurationException(policy
        + " does not extend SchedulingPolicy");
  }
  return getInstance(clazz);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:SchedulingPolicy.java

示例2: testBasicDRFAssignment

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
@Test
public void testBasicDRFAssignment() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5, 5));
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(2048, 1, 1, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(1024, 2, 2, "queue1",
      "user1", 2);
  FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);

  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterResource());
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  // First both apps get a container
  // Then the first gets another container because its dominant share of
  // 2048/8192 is less than the other's of 2/5
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(0, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFairScheduler.java

示例3: testBasicDRFWithQueues

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
/**
 * Two apps on one queue, one app on another
 */
@Test
public void testBasicDRFWithQueues() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 7, 7),
      1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(3072, 1, 1, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(2048, 2, 2, "queue1",
      "user1", 2);
  FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);
  ApplicationAttemptId appAttId3 = createSchedulingRequest(1024, 2, 2, "queue2",
      "user1", 2);
  FSAppAttempt app3 = scheduler.getSchedulerApp(appAttId3);
  
  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterResource());
  scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestFairScheduler.java

示例4: testParseSchedulingPolicy

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
@Test(timeout = 1000)
public void testParseSchedulingPolicy()
    throws AllocationConfigurationException {

  // Class name
  SchedulingPolicy sm = SchedulingPolicy
      .parse(FairSharePolicy.class.getName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Canonical name
  sm = SchedulingPolicy.parse(FairSharePolicy.class
      .getCanonicalName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Class
  sm = SchedulingPolicy.getInstance(FairSharePolicy.class);
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - drf
  sm = SchedulingPolicy.parse("drf");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(DominantResourceFairnessPolicy.NAME));
  
  // Shortname - fair
  sm = SchedulingPolicy.parse("fair");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - fifo
  sm = SchedulingPolicy.parse("fifo");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FifoPolicy.NAME));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestSchedulingPolicy.java

示例5: checkFSQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
private void checkFSQueue(ResourceManager rm,
    SchedulerApplication  schedulerApp, Resource usedResources,
    Resource availableResources) throws Exception {
  // waiting for RM's scheduling apps
  int retry = 0;
  Resource assumedFairShare = Resource.newInstance(8192, 8);
  while (true) {
    Thread.sleep(100);
    if (assumedFairShare.equals(((FairScheduler)rm.getResourceScheduler())
        .getQueueManager().getRootQueue().getFairShare())) {
      break;
    }
    retry++;
    if (retry > 30) {
      Assert.fail("Apps are not scheduled within assumed timeout");
    }
  }

  FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
  FSParentQueue root = scheduler.getQueueManager().getRootQueue();
  // ************ check cluster used Resources ********
  assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
  assertEquals(usedResources,root.getResourceUsage());

  // ************ check app headroom ****************
  FSAppAttempt schedulerAttempt =
      (FSAppAttempt) schedulerApp.getCurrentAppAttempt();
  assertEquals(availableResources, schedulerAttempt.getHeadroom());

  // ************ check queue metrics ****************
  QueueMetrics queueMetrics = scheduler.getRootQueueMetrics();
  assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
      availableResources.getVirtualCores(), usedResources.getMemory(),
      usedResources.getVirtualCores());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestWorkPreservingRMRestart.java

示例6: testBasicDRFAssignment

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
@Test
public void testBasicDRFAssignment() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5));
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(2048, 1, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(1024, 2, "queue1",
      "user1", 2);
  FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);

  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterResource());
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  // First both apps get a container
  // Then the first gets another container because its dominant share of
  // 2048/8192 is less than the other's of 2/5
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(0, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:TestFairScheduler.java

示例7: testBasicDRFWithQueues

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
/**
 * Two apps on one queue, one app on another
 */
@Test
public void testBasicDRFWithQueues() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 7),
      1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(3072, 1, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(2048, 2, "queue1",
      "user1", 2);
  FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);
  ApplicationAttemptId appAttId3 = createSchedulingRequest(1024, 2, "queue2",
      "user1", 2);
  FSAppAttempt app3 = scheduler.getSchedulerApp(appAttId3);
  
  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterResource());
  scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:41,代码来源:TestFairScheduler.java

示例8: parse

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
/**
 * Returns {@link SchedulingPolicy} instance corresponding to the
 * {@link SchedulingPolicy} passed as a string. The policy can be "fair" for
 * FairSharePolicy, "fifo" for FifoPolicy, or "drf" for
 * DominantResourceFairnessPolicy. For a custom
 * {@link SchedulingPolicy}s in the RM classpath, the policy should be
 * canonical class name of the {@link SchedulingPolicy}.
 * 
 * @param policy canonical class name or "drf" or "fair" or "fifo"
 * @throws AllocationConfigurationException
 */
@SuppressWarnings("unchecked")
public static SchedulingPolicy parse(String policy)
    throws AllocationConfigurationException {
  @SuppressWarnings("rawtypes")
  Class clazz;
  String text = policy.toLowerCase();
  if (text.equalsIgnoreCase(FairSharePolicy.NAME)) {
    clazz = FairSharePolicy.class;
  } else if (text.equalsIgnoreCase(FifoPolicy.NAME)) {
    clazz = FifoPolicy.class;
  } else if (text.equalsIgnoreCase(DominantResourceFairnessPolicy.NAME)) {
    clazz = DominantResourceFairnessPolicy.class;
  } else {
    try {
      clazz = Class.forName(policy);
    } catch (ClassNotFoundException cnfe) {
      throw new AllocationConfigurationException(policy
          + " SchedulingPolicy class not found!");
    }
  }
  if (!SchedulingPolicy.class.isAssignableFrom(clazz)) {
    throw new AllocationConfigurationException(policy
        + " does not extend SchedulingPolicy");
  }
  return getInstance(clazz);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:38,代码来源:SchedulingPolicy.java

示例9: testBasicDRFAssignment

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
public void testBasicDRFAssignment() throws Exception {
  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5));
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(2048, 1, "queue1",
      "user1", 2);
  FSSchedulerApp app1 = scheduler.applications.get(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(1024, 2, "queue1",
      "user1", 2);
  FSSchedulerApp app2 = scheduler.applications.get(appAttId2);

  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterCapacity());
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  // First both apps get a container
  // Then the first gets another container because its dominant share of
  // 2048/8192 is less than the other's of 2/5
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(0, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:34,代码来源:TestFairScheduler.java

示例10: testBasicDRFWithQueues

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
/**
 * Two apps on one queue, one app on another
 */
@Test
public void testBasicDRFWithQueues() throws Exception {
  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 7),
      1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(3072, 1, "queue1",
      "user1", 2);
  FSSchedulerApp app1 = scheduler.applications.get(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(2048, 2, "queue1",
      "user1", 2);
  FSSchedulerApp app2 = scheduler.applications.get(appAttId2);
  ApplicationAttemptId appAttId3 = createSchedulingRequest(1024, 2, "queue2",
      "user1", 2);
  FSSchedulerApp app3 = scheduler.applications.get(appAttId3);
  
  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterCapacity());
  scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:37,代码来源:TestFairScheduler.java

示例11: testBasicDRFAssignment

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
@Ignore
@Test
public void testBasicDRFAssignment() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5));
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(2048, 1, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(1024, 2, "queue1",
      "user1", 2);
  FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);

  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterResource());
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  // First both apps get a container
  // Then the first gets another container because its dominant share of
  // 2048/8192 is less than the other's of 2/5
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(0, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());

  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app1.getLiveContainers().size());
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:40,代码来源:TestFairScheduler.java

示例12: testBasicDRFWithQueues

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
/**
 * Two apps on one queue, one app on another
 */
@Ignore
@Test
public void testBasicDRFWithQueues() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 7),
      1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId appAttId1 = createSchedulingRequest(3072, 1, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1);
  ApplicationAttemptId appAttId2 = createSchedulingRequest(2048, 2, "queue1",
      "user1", 2);
  FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);
  ApplicationAttemptId appAttId3 = createSchedulingRequest(1024, 2, "queue2",
      "user1", 2);
  FSAppAttempt app3 = scheduler.getSchedulerApp(appAttId3);
  
  DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
  drfPolicy.initialize(scheduler.getClusterResource());
  scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
  scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
  scheduler.update();

  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app1.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(2, app3.getLiveContainers().size());
  scheduler.handle(updateEvent);
  Assert.assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:42,代码来源:TestFairScheduler.java

示例13: testParseSchedulingPolicy

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
public void testParseSchedulingPolicy()
    throws AllocationConfigurationException {

  // Class name
  SchedulingPolicy sm = SchedulingPolicy
      .parse(FairSharePolicy.class.getName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Canonical name
  sm = SchedulingPolicy.parse(FairSharePolicy.class
      .getCanonicalName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Class
  sm = SchedulingPolicy.getInstance(FairSharePolicy.class);
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - drf
  sm = SchedulingPolicy.parse("drf");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(DominantResourceFairnessPolicy.NAME));
  
  // Shortname - fair
  sm = SchedulingPolicy.parse("fair");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - fifo
  sm = SchedulingPolicy.parse("fifo");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FifoPolicy.NAME));
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:36,代码来源:TestSchedulingPolicy.java

示例14: checkFSQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
private void checkFSQueue(ResourceManager rm,
    SchedulerApplication  schedulerApp, Resource usedResources,
    Resource availableResources) throws Exception {
  // waiting for RM's scheduling apps
  int retry = 0;
  Resource assumedFairShare = Resource.newInstance(8192, 8);
  while (true) {
    Thread.sleep(100);
    if (assumedFairShare.equals(((FairScheduler)rm.getResourceScheduler())
        .getQueueManager().getRootQueue().getFairShare())) {
      break;
    }
    retry++;
    if (retry > 30) {
      Assert.fail("Apps are not scheduled within assumed timeout");
    }
  }

  FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
  FSParentQueue root = scheduler.getQueueManager().getRootQueue();
  // ************ check cluster used Resources ********
  assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
  assertEquals(usedResources,root.getResourceUsage());

  // ************ check app headroom ****************
  FSAppAttempt schedulerAttempt =
      (FSAppAttempt) schedulerApp.getCurrentAppAttempt();
  assertEquals(availableResources, schedulerAttempt.getHeadroom());

  // ************ check queue metrics ****************
  QueueMetrics queueMetrics = scheduler.getRootQueueMetrics();
  assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemorySize(),
      availableResources.getVirtualCores(), usedResources.getMemorySize(),
      usedResources.getVirtualCores());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:36,代码来源:TestWorkPreservingRMRestart.java

示例15: testConfigureRootQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; //导入依赖的package包/类
@Test
public void testConfigureRootQueue() throws Exception {
  conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);

  PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
  out.println("<?xml version=\"1.0\"?>");
  out.println("<allocations>");
  out.println("<defaultQueueSchedulingPolicy>fair</defaultQueueSchedulingPolicy>");
  out.println("<queue name=\"root\">");
  out.println("  <schedulingPolicy>drf</schedulingPolicy>");
  out.println("  <queue name=\"child1\">");
  out.println("    <minResources>1024mb,1vcores</minResources>");
  out.println("  </queue>");
  out.println("  <queue name=\"child2\">");
  out.println("    <minResources>1024mb,4vcores</minResources>");
  out.println("  </queue>");
  out.println("</queue>");
  out.println("</allocations>");
  out.close();
  
  scheduler.reinitialize(conf, resourceManager.getRMContext());
  QueueManager queueManager = scheduler.getQueueManager();
  
  FSQueue root = queueManager.getRootQueue();
  assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
  
  assertNotNull(queueManager.getLeafQueue("child1", false));
  assertNotNull(queueManager.getLeafQueue("child2", false));
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:30,代码来源:TestFairScheduler.java


注:本文中的org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。