当前位置: 首页>>代码示例>>Java>>正文


Java FifoPolicy类代码示例

本文整理汇总了Java中org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy的典型用法代码示例。如果您正苦于以下问题:Java FifoPolicy类的具体用法?Java FifoPolicy怎么用?Java FifoPolicy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FifoPolicy类属于org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies包,在下文中一共展示了FifoPolicy类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parse

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
/**
 * Returns {@link SchedulingPolicy} instance corresponding to the
 * {@link SchedulingPolicy} passed as a string. The policy can be "fair" for
 * FairSharePolicy, "fifo" for FifoPolicy, or "drf" for
 * DominantResourceFairnessPolicy. For a custom
 * {@link SchedulingPolicy}s in the RM classpath, the policy should be
 * canonical class name of the {@link SchedulingPolicy}.
 * 
 * @param policy canonical class name or "drf" or "fair" or "fifo"
 * @throws AllocationConfigurationException
 */
@SuppressWarnings("unchecked")
public static SchedulingPolicy parse(String policy)
    throws AllocationConfigurationException {
  @SuppressWarnings("rawtypes")
  Class clazz;
  String text = StringUtils.toLowerCase(policy);
  if (text.equalsIgnoreCase(FairSharePolicy.NAME)) {
    clazz = FairSharePolicy.class;
  } else if (text.equalsIgnoreCase(FifoPolicy.NAME)) {
    clazz = FifoPolicy.class;
  } else if (text.equalsIgnoreCase(DominantResourceFairnessPolicy.NAME)) {
    clazz = DominantResourceFairnessPolicy.class;
  } else {
    try {
      clazz = Class.forName(policy);
    } catch (ClassNotFoundException cnfe) {
      throw new AllocationConfigurationException(policy
          + " SchedulingPolicy class not found!");
    }
  }
  if (!SchedulingPolicy.class.isAssignableFrom(clazz)) {
    throw new AllocationConfigurationException(policy
        + " does not extend SchedulingPolicy");
  }
  return getInstance(clazz);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:SchedulingPolicy.java

示例2: testParseSchedulingPolicy

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test(timeout = 1000)
public void testParseSchedulingPolicy()
    throws AllocationConfigurationException {

  // Class name
  SchedulingPolicy sm = SchedulingPolicy
      .parse(FairSharePolicy.class.getName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Canonical name
  sm = SchedulingPolicy.parse(FairSharePolicy.class
      .getCanonicalName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Class
  sm = SchedulingPolicy.getInstance(FairSharePolicy.class);
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - drf
  sm = SchedulingPolicy.parse("drf");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(DominantResourceFairnessPolicy.NAME));
  
  // Shortname - fair
  sm = SchedulingPolicy.parse("fair");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - fifo
  sm = SchedulingPolicy.parse("fifo");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FifoPolicy.NAME));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestSchedulingPolicy.java

示例3: parse

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
/**
 * Returns {@link SchedulingPolicy} instance corresponding to the
 * {@link SchedulingPolicy} passed as a string. The policy can be "fair" for
 * FairSharePolicy, "fifo" for FifoPolicy, or "drf" for
 * DominantResourceFairnessPolicy. For a custom
 * {@link SchedulingPolicy}s in the RM classpath, the policy should be
 * canonical class name of the {@link SchedulingPolicy}.
 * 
 * @param policy canonical class name or "drf" or "fair" or "fifo"
 * @throws AllocationConfigurationException
 */
@SuppressWarnings("unchecked")
public static SchedulingPolicy parse(String policy)
    throws AllocationConfigurationException {
  @SuppressWarnings("rawtypes")
  Class clazz;
  String text = policy.toLowerCase();
  if (text.equalsIgnoreCase(FairSharePolicy.NAME)) {
    clazz = FairSharePolicy.class;
  } else if (text.equalsIgnoreCase(FifoPolicy.NAME)) {
    clazz = FifoPolicy.class;
  } else if (text.equalsIgnoreCase(DominantResourceFairnessPolicy.NAME)) {
    clazz = DominantResourceFairnessPolicy.class;
  } else {
    try {
      clazz = Class.forName(policy);
    } catch (ClassNotFoundException cnfe) {
      throw new AllocationConfigurationException(policy
          + " SchedulingPolicy class not found!");
    }
  }
  if (!SchedulingPolicy.class.isAssignableFrom(clazz)) {
    throw new AllocationConfigurationException(policy
        + " does not extend SchedulingPolicy");
  }
  return getInstance(clazz);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:38,代码来源:SchedulingPolicy.java

示例4: testFifoWithinQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test (timeout = 5000)
public void testFifoWithinQueue() throws Exception {
  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);
  
  // Even if submitted at exact same time, apps will be deterministically
  // ordered by name.
  ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  FSSchedulerApp app1 = scheduler.applications.get(attId1);
  FSSchedulerApp app2 = scheduler.applications.get(attId2);
  
  FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1");
  queue1.setPolicy(new FifoPolicy());
  
  scheduler.update();

  // First two containers should go to app 1, third should go to app 2.
  // Because tests set assignmultiple to false, each heartbeat assigns a single
  // container.
  
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);

  scheduler.handle(updateEvent);
  assertEquals(1, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:41,代码来源:TestFairScheduler.java

示例5: testParseSchedulingPolicy

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
public void testParseSchedulingPolicy()
    throws AllocationConfigurationException {

  // Class name
  SchedulingPolicy sm = SchedulingPolicy
      .parse(FairSharePolicy.class.getName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Canonical name
  sm = SchedulingPolicy.parse(FairSharePolicy.class
      .getCanonicalName());
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Class
  sm = SchedulingPolicy.getInstance(FairSharePolicy.class);
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - drf
  sm = SchedulingPolicy.parse("drf");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(DominantResourceFairnessPolicy.NAME));
  
  // Shortname - fair
  sm = SchedulingPolicy.parse("fair");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FairSharePolicy.NAME));

  // Shortname - fifo
  sm = SchedulingPolicy.parse("fifo");
  assertTrue("Invalid scheduler name",
      sm.getName().equals(FifoPolicy.NAME));
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:36,代码来源:TestSchedulingPolicy.java

示例6: testFifoWithinQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test (timeout = 5000)
public void testFifoWithinQueue() throws Exception {
  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);
  
  // Even if submitted at exact same time, apps will be deterministically
  // ordered by name.
  ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  FSSchedulerApp app1 = scheduler.applications.get(attId1);
  FSSchedulerApp app2 = scheduler.applications.get(attId2);
  
  FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
  queue1.setPolicy(new FifoPolicy());
  
  scheduler.update();

  // First two containers should go to app 1, third should go to app 2.
  // Because tests set assignmultiple to false, each heartbeat assigns a single
  // container.
  
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);

  scheduler.handle(updateEvent);
  assertEquals(1, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:41,代码来源:TestFairScheduler.java

示例7: testFifoWithinQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test (timeout = 5000)
public void testFifoWithinQueue() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(3072, 3, 3), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);
  
  // Even if submitted at exact same time, apps will be deterministically
  // ordered by name.
  ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
  FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
  
  FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
  queue1.setPolicy(new FifoPolicy());
  
  scheduler.update();

  // First two containers should go to app 1, third should go to app 2.
  // Because tests set assignmultiple to false, each heartbeat assigns a single
  // container.
  
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);

  scheduler.handle(updateEvent);
  assertEquals(1, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestFairScheduler.java

示例8: testHeadroom

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test
public void testHeadroom() {
  final FairScheduler mockScheduler = Mockito.mock(FairScheduler.class);
  Mockito.when(mockScheduler.getClock()).thenReturn(scheduler.getClock());

  final FSLeafQueue mockQueue = Mockito.mock(FSLeafQueue.class);

  final Resource queueMaxResources = Resource.newInstance(5 * 1024, 3, 3);
  final Resource queueFairShare = Resources.createResource(4096, 2, 2);
  final Resource queueUsage = Resource.newInstance(2048, 2, 2);

  final Resource queueStarvation =
      Resources.subtract(queueFairShare, queueUsage);
  final Resource queueMaxResourcesAvailable =
      Resources.subtract(queueMaxResources, queueUsage);

  final Resource clusterResource = Resources.createResource(8192, 8, 8);
  final Resource clusterUsage = Resources.createResource(2048, 2, 2);
  final Resource clusterAvailable =
      Resources.subtract(clusterResource, clusterUsage);

  final QueueMetrics fakeRootQueueMetrics = Mockito.mock(QueueMetrics.class);

  Mockito.when(mockQueue.getMaxShare()).thenReturn(queueMaxResources);
  Mockito.when(mockQueue.getFairShare()).thenReturn(queueFairShare);
  Mockito.when(mockQueue.getResourceUsage()).thenReturn(queueUsage);
  Mockito.when(mockScheduler.getClusterResource()).thenReturn
      (clusterResource);
  Mockito.when(fakeRootQueueMetrics.getAllocatedResources()).thenReturn
      (clusterUsage);
  Mockito.when(mockScheduler.getRootQueueMetrics()).thenReturn
      (fakeRootQueueMetrics);

  ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
  RMContext rmContext = resourceManager.getRMContext();
  FSAppAttempt schedulerApp =
      new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue ,
          null, rmContext);

  // Min of Memory and CPU across cluster and queue is used in
  // DominantResourceFairnessPolicy
  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(DominantResourceFairnessPolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemory(),
          clusterAvailable.getMemory(),
          queueMaxResourcesAvailable.getMemory()),
      min(queueStarvation.getVirtualCores(),
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );

  // Fair and Fifo ignore CPU of queue, so use cluster available CPU
  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(FairSharePolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemory(),
          clusterAvailable.getMemory(),
          queueMaxResourcesAvailable.getMemory()),
      Math.min(
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );

  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(FifoPolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemory(),
          clusterAvailable.getMemory(),
          queueMaxResourcesAvailable.getMemory()),
      Math.min(
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestFSAppAttempt.java

示例9: testFifoWithinQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test (timeout = 5000)
public void testFifoWithinQueue() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);
  
  // Even if submitted at exact same time, apps will be deterministically
  // ordered by name.
  ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
  FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
  
  FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
  queue1.setPolicy(new FifoPolicy());
  
  scheduler.update();

  // First two containers should go to app 1, third should go to app 2.
  // Because tests set assignmultiple to false, each heartbeat assigns a single
  // container.
  
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);

  scheduler.handle(updateEvent);
  assertEquals(1, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:45,代码来源:TestFairScheduler.java

示例10: testHeadroom

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test
public void testHeadroom() {
  final FairScheduler mockScheduler = Mockito.mock(FairScheduler.class);
  Mockito.when(mockScheduler.getClock()).thenReturn(scheduler.getClock());

  final FSLeafQueue mockQueue = Mockito.mock(FSLeafQueue.class);

  final Resource queueMaxResources = Resource.newInstance(5 * 1024, 3);
  final Resource queueFairShare = Resources.createResource(4096, 2);
  final Resource queueUsage = Resource.newInstance(2048, 2);

  final Resource queueStarvation =
      Resources.subtract(queueFairShare, queueUsage);
  final Resource queueMaxResourcesAvailable =
      Resources.subtract(queueMaxResources, queueUsage);

  final Resource clusterResource = Resources.createResource(8192, 8);
  final Resource clusterUsage = Resources.createResource(2048, 2);
  final Resource clusterAvailable =
      Resources.subtract(clusterResource, clusterUsage);

  final QueueMetrics fakeRootQueueMetrics = Mockito.mock(QueueMetrics.class);

  Mockito.when(mockQueue.getMaxShare()).thenReturn(queueMaxResources);
  Mockito.when(mockQueue.getFairShare()).thenReturn(queueFairShare);
  Mockito.when(mockQueue.getResourceUsage()).thenReturn(queueUsage);
  Mockito.when(mockScheduler.getClusterResource()).thenReturn
      (clusterResource);
  Mockito.when(fakeRootQueueMetrics.getAllocatedResources()).thenReturn
      (clusterUsage);
  Mockito.when(mockScheduler.getRootQueueMetrics()).thenReturn
      (fakeRootQueueMetrics);

  ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
  RMContext rmContext = resourceManager.getRMContext();
  FSAppAttempt schedulerApp =
      new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue ,
          null, rmContext);

  // Min of Memory and CPU across cluster and queue is used in
  // DominantResourceFairnessPolicy
  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(DominantResourceFairnessPolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemory(),
          clusterAvailable.getMemory(),
          queueMaxResourcesAvailable.getMemory()),
      min(queueStarvation.getVirtualCores(),
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );

  // Fair and Fifo ignore CPU of queue, so use cluster available CPU
  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(FairSharePolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemory(),
          clusterAvailable.getMemory(),
          queueMaxResourcesAvailable.getMemory()),
      Math.min(
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );

  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(FifoPolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemory(),
          clusterAvailable.getMemory(),
          queueMaxResourcesAvailable.getMemory()),
      Math.min(
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:76,代码来源:TestFSAppAttempt.java

示例11: testHeadroom

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test
public void testHeadroom() {
  final FairScheduler mockScheduler = Mockito.mock(FairScheduler.class);
  Mockito.when(mockScheduler.getClock()).thenReturn(scheduler.getClock());

  final FSLeafQueue mockQueue = Mockito.mock(FSLeafQueue.class);

  final Resource queueMaxResources = Resource.newInstance(5 * 1024, 3);
  final Resource queueFairShare = Resources.createResource(4096, 2);
  final Resource queueUsage = Resource.newInstance(2048, 2);

  final Resource queueStarvation =
      Resources.subtract(queueFairShare, queueUsage);
  final Resource queueMaxResourcesAvailable =
      Resources.subtract(queueMaxResources, queueUsage);

  final Resource clusterResource = Resources.createResource(8192, 8);
  final Resource clusterUsage = Resources.createResource(2048, 2);
  final Resource clusterAvailable =
      Resources.subtract(clusterResource, clusterUsage);

  final QueueMetrics fakeRootQueueMetrics = Mockito.mock(QueueMetrics.class);

  Mockito.when(mockQueue.getMaxShare()).thenReturn(queueMaxResources);
  Mockito.when(mockQueue.getFairShare()).thenReturn(queueFairShare);
  Mockito.when(mockQueue.getResourceUsage()).thenReturn(queueUsage);
  Mockito.when(mockScheduler.getClusterResource()).thenReturn
      (clusterResource);
  Mockito.when(fakeRootQueueMetrics.getAllocatedResources()).thenReturn
      (clusterUsage);
  Mockito.when(mockScheduler.getRootQueueMetrics()).thenReturn
      (fakeRootQueueMetrics);

  ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
  RMContext rmContext = resourceManager.getRMContext();
  FSAppAttempt schedulerApp =
      new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue ,
          null, rmContext);

  // Min of Memory and CPU across cluster and queue is used in
  // DominantResourceFairnessPolicy
  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(DominantResourceFairnessPolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemorySize(),
          clusterAvailable.getMemorySize(),
          queueMaxResourcesAvailable.getMemorySize()),
      min(queueStarvation.getVirtualCores(),
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );

  // Fair and Fifo ignore CPU of queue, so use cluster available CPU
  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(FairSharePolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemorySize(),
          clusterAvailable.getMemorySize(),
          queueMaxResourcesAvailable.getMemorySize()),
      Math.min(
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );

  Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
      .getInstance(FifoPolicy.class));
  verifyHeadroom(schedulerApp,
      min(queueStarvation.getMemorySize(),
          clusterAvailable.getMemorySize(),
          queueMaxResourcesAvailable.getMemorySize()),
      Math.min(
          clusterAvailable.getVirtualCores(),
          queueMaxResourcesAvailable.getVirtualCores())
  );
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:76,代码来源:TestFSAppAttempt.java

示例12: testFifoWithinQueue

import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; //导入依赖的package包/类
@Test (timeout = 5000)
public void testFifoWithinQueue() throws Exception {
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);
  
  // Even if submitted at exact same time, apps will be deterministically
  // ordered by name.
  ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1",
      "user1", 2);
  FSSchedulerApp app1 = scheduler.getSchedulerApp(attId1);
  FSSchedulerApp app2 = scheduler.getSchedulerApp(attId2);
  
  FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
  queue1.setPolicy(new FifoPolicy());
  
  scheduler.update();

  // First two containers should go to app 1, third should go to app 2.
  // Because tests set assignmultiple to false, each heartbeat assigns a single
  // container.
  
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);

  scheduler.handle(updateEvent);
  assertEquals(1, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(0, app2.getLiveContainers().size());
  
  scheduler.handle(updateEvent);
  assertEquals(2, app1.getLiveContainers().size());
  assertEquals(1, app2.getLiveContainers().size());
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:43,代码来源:TestFairScheduler.java


注:本文中的org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。