本文整理汇总了Java中org.apache.hadoop.yarn.util.resource.DominantResourceCalculator类的典型用法代码示例。如果您正苦于以下问题:Java DominantResourceCalculator类的具体用法?Java DominantResourceCalculator怎么用?Java DominantResourceCalculator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DominantResourceCalculator类属于org.apache.hadoop.yarn.util.resource包,在下文中一共展示了DominantResourceCalculator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildEnv
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
private void buildEnv(String labelsConfig, String nodesConfig,
String queuesConfig, String appsConfig,
boolean useDominantResourceCalculator) throws IOException {
if (useDominantResourceCalculator) {
when(cs.getResourceCalculator()).thenReturn(
new DominantResourceCalculator());
}
mockNodeLabelsManager(labelsConfig);
mockSchedulerNodes(nodesConfig);
for (NodeId nodeId : nodeIdToSchedulerNodes.keySet()) {
when(cs.getSchedulerNode(nodeId)).thenReturn(
nodeIdToSchedulerNodes.get(nodeId));
}
ParentQueue root = mockQueueHierarchy(queuesConfig);
when(cs.getRootQueue()).thenReturn(root);
when(cs.getClusterResource()).thenReturn(clusterResource);
mockApplications(appsConfig);
policy = new ProportionalCapacityPreemptionPolicy(conf, rmContext, cs,
mClock);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:TestProportionalCapacityPreemptionPolicyForNodePartitions.java
示例2: buildPolicy
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData,
String[][] resData, boolean useDominantResourceCalculator) {
if (useDominantResourceCalculator) {
when(mCS.getResourceCalculator()).thenReturn(
new DominantResourceCalculator());
}
ProportionalCapacityPreemptionPolicy policy =
new ProportionalCapacityPreemptionPolicy(conf, rmContext, mCS, mClock);
clusterResources = leafAbsCapacities(parseResourceDetails(resData[0]),
qData[2]);
ParentQueue mRoot = buildMockRootQueue(rand, resData, qData);
when(mCS.getRootQueue()).thenReturn(mRoot);
setResourceAndNodeDetails();
return policy;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestProportionalCapacityPreemptionPolicy.java
示例3: buildEnv
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
public void buildEnv(String labelsConfig, String nodesConfig,
String queuesConfig, String appsConfig,
boolean useDominantResourceCalculator) throws IOException {
if (useDominantResourceCalculator) {
when(cs.getResourceCalculator()).thenReturn(
new DominantResourceCalculator());
}
mockNodeLabelsManager(labelsConfig);
mockSchedulerNodes(nodesConfig);
for (NodeId nodeId : nodeIdToSchedulerNodes.keySet()) {
when(cs.getSchedulerNode(nodeId)).thenReturn(
nodeIdToSchedulerNodes.get(nodeId));
}
when(cs.getAllNodes()).thenReturn(nodeIdToSchedulerNodes);
ParentQueue root = mockQueueHierarchy(queuesConfig);
when(cs.getRootQueue()).thenReturn(root);
when(cs.getClusterResource()).thenReturn(clusterResource);
mockApplications(appsConfig);
policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs,
mClock);
}
示例4: testNormalizeRequestWithDominantResourceCalculator
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test (timeout = 30000)
public void testNormalizeRequestWithDominantResourceCalculator() {
ResourceCalculator resourceCalculator = new DominantResourceCalculator();
Resource minResource = Resources.createResource(1024, 1, 0);
Resource maxResource = Resources.createResource(10240, 10, 10);
Resource clusterResource = Resources.createResource(10 * 1024, 10, 10);
ResourceRequest ask = new ResourceRequestPBImpl();
// case negative memory/vcores/gcores
ask.setCapability(Resources.createResource(-1024, -1, -1));
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(minResource, ask.getCapability());
// case zero memory/vcores/gcores
ask.setCapability(Resources.createResource(0, 0, 0));
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(minResource, ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
assertEquals(1024, ask.getCapability().getMemory());
assertEquals(0, ask.getCapability().getGpuCores());
// case non-zero memory & zero cores
ask.setCapability(Resources.createResource(1536, 0, 0));
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(Resources.createResource(2048, 1, 0), ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
assertEquals(2048, ask.getCapability().getMemory());
assertEquals(0, ask.getCapability().getGpuCores());
}
示例5: testAMLimitUsage
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 30000)
public void testAMLimitUsage() throws Exception {
CapacitySchedulerConfiguration config =
new CapacitySchedulerConfiguration();
config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DefaultResourceCalculator.class.getName());
verifyAMLimitForLeafQueue(config);
config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getName());
verifyAMLimitForLeafQueue(config);
}
示例6: testNormalizeRequestWithDominantResourceCalculator
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test (timeout = 30000)
public void testNormalizeRequestWithDominantResourceCalculator() {
ResourceCalculator resourceCalculator = new DominantResourceCalculator();
Resource minResource = Resources.createResource(1024, 1);
Resource maxResource = Resources.createResource(10240, 10);
Resource clusterResource = Resources.createResource(10 * 1024, 10);
ResourceRequest ask = new ResourceRequestPBImpl();
// case negative memory/vcores
ask.setCapability(Resources.createResource(-1024, -1));
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(minResource, ask.getCapability());
// case zero memory/vcores
ask.setCapability(Resources.createResource(0, 0));
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(minResource, ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
assertEquals(1024, ask.getCapability().getMemory());
// case non-zero memory & zero cores
ask.setCapability(Resources.createResource(1536, 0));
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(Resources.createResource(2048, 1), ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
assertEquals(2048, ask.getCapability().getMemory());
}
示例7: testAMLimitUsage
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 30000)
public void testAMLimitUsage() throws Exception {
CapacitySchedulerConfiguration config =
new CapacitySchedulerConfiguration();
config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DefaultResourceCalculator.class.getName());
verifyAMLimitForLeafQueue(config);
config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getName());
verifyAMLimitForLeafQueue(config);
}
示例8: testAMLimitUsage
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 60000)
public void testAMLimitUsage() throws Exception {
CapacitySchedulerConfiguration config =
new CapacitySchedulerConfiguration();
config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DefaultResourceCalculator.class.getName());
verifyAMLimitForLeafQueue(config);
config.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getName());
verifyAMLimitForLeafQueue(config);
}
示例9: testResourceTypes
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 3000000)
public void testResourceTypes() throws Exception {
HashMap<YarnConfiguration, EnumSet<SchedulerResourceTypes>> driver =
new HashMap<YarnConfiguration, EnumSet<SchedulerResourceTypes>>();
CapacitySchedulerConfiguration csconf =
new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
YarnConfiguration testCapacityDRConf = new YarnConfiguration(csconf);
testCapacityDRConf.setClass(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class, ResourceScheduler.class);
YarnConfiguration testCapacityDefConf = new YarnConfiguration();
testCapacityDefConf.setClass(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class, ResourceScheduler.class);
YarnConfiguration testFairDefConf = new YarnConfiguration();
testFairDefConf.setClass(YarnConfiguration.RM_SCHEDULER,
FairScheduler.class, ResourceScheduler.class);
driver.put(conf, EnumSet.of(SchedulerResourceTypes.MEMORY));
driver.put(testCapacityDRConf,
EnumSet.of(SchedulerResourceTypes.CPU, SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.GPU));
driver.put(testCapacityDefConf, EnumSet.of(SchedulerResourceTypes.MEMORY));
driver.put(testFairDefConf,
EnumSet.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU, SchedulerResourceTypes.GPU));
for (Map.Entry<YarnConfiguration, EnumSet<SchedulerResourceTypes>> entry : driver
.entrySet()) {
EnumSet<SchedulerResourceTypes> expectedValue = entry.getValue();
MockRM rm = new MockRM(entry.getKey());
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
RMApp app1 = rm.submitApp(2048);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
RegisterApplicationMasterResponse resp = am1.registerAppAttempt();
EnumSet<SchedulerResourceTypes> types = resp.getSchedulerResourceTypes();
LOG.info("types = " + types.toString());
Assert.assertEquals(expectedValue, types);
rm.stop();
}
}
示例10: testAppReservationWithDominantResourceCalculator
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 30000)
public void testAppReservationWithDominantResourceCalculator() throws Exception {
CapacitySchedulerConfiguration csconf =
new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
YarnConfiguration conf = new YarnConfiguration(csconf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10 * GB, 1, 1);
// register extra nodes to bump up cluster resource
MockNM nm2 = rm.registerNode("127.0.0.1:1235", 10 * GB, 4, 4);
rm.registerNode("127.0.0.1:1236", 10 * GB, 4, 4);
RMApp app1 = rm.submitApp(1024);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report
Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1);
am1.schedule(); // send the request
// kick the scheduler, container reservation should not happen
nm1.nodeHeartbeat(true);
Thread.sleep(1000);
AllocateResponse allocResponse = am1.schedule();
ApplicationResourceUsageReport report =
rm.getResourceScheduler().getAppResourceUsageReport(
attempt1.getAppAttemptId());
Assert.assertEquals(0, allocResponse.getAllocatedContainers().size());
Assert.assertEquals(0, report.getNumReservedContainers());
// container should get allocated on this node
nm2.nodeHeartbeat(true);
while (allocResponse.getAllocatedContainers().size() == 0) {
Thread.sleep(100);
allocResponse = am1.schedule();
}
report =
rm.getResourceScheduler().getAppResourceUsageReport(
attempt1.getAppAttemptId());
Assert.assertEquals(1, allocResponse.getAllocatedContainers().size());
Assert.assertEquals(0, report.getNumReservedContainers());
rm.stop();
}
示例11: testResourceTypes
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 3000000)
public void testResourceTypes() throws Exception {
HashMap<YarnConfiguration, EnumSet<SchedulerResourceTypes>> driver =
new HashMap<YarnConfiguration, EnumSet<SchedulerResourceTypes>>();
CapacitySchedulerConfiguration csconf =
new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
YarnConfiguration testCapacityDRConf = new YarnConfiguration(csconf);
testCapacityDRConf.setClass(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class, ResourceScheduler.class);
YarnConfiguration testCapacityDefConf = new YarnConfiguration();
testCapacityDefConf.setClass(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class, ResourceScheduler.class);
YarnConfiguration testFairDefConf = new YarnConfiguration();
testFairDefConf.setClass(YarnConfiguration.RM_SCHEDULER,
FairScheduler.class, ResourceScheduler.class);
driver.put(conf, EnumSet.of(SchedulerResourceTypes.MEMORY));
driver.put(testCapacityDRConf,
EnumSet.of(SchedulerResourceTypes.CPU, SchedulerResourceTypes.MEMORY));
driver.put(testCapacityDefConf, EnumSet.of(SchedulerResourceTypes.MEMORY));
driver.put(testFairDefConf,
EnumSet.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU));
for (Map.Entry<YarnConfiguration, EnumSet<SchedulerResourceTypes>> entry : driver
.entrySet()) {
EnumSet<SchedulerResourceTypes> expectedValue = entry.getValue();
MockRM rm = new MockRM(entry.getKey());
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
RMApp app1 = rm.submitApp(2048);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
RegisterApplicationMasterResponse resp = am1.registerAppAttempt();
EnumSet<SchedulerResourceTypes> types = resp.getSchedulerResourceTypes();
LOG.info("types = " + types.toString());
Assert.assertEquals(expectedValue, types);
rm.stop();
}
}
示例12: testAppReservationWithDominantResourceCalculator
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 30000)
public void testAppReservationWithDominantResourceCalculator() throws Exception {
CapacitySchedulerConfiguration csconf =
new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
YarnConfiguration conf = new YarnConfiguration(csconf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10 * GB, 1);
// register extra nodes to bump up cluster resource
MockNM nm2 = rm.registerNode("127.0.0.1:1235", 10 * GB, 4);
rm.registerNode("127.0.0.1:1236", 10 * GB, 4);
RMApp app1 = rm.submitApp(1024);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report
Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1);
am1.schedule(); // send the request
// kick the scheduler, container reservation should not happen
nm1.nodeHeartbeat(true);
Thread.sleep(1000);
AllocateResponse allocResponse = am1.schedule();
ApplicationResourceUsageReport report =
rm.getResourceScheduler().getAppResourceUsageReport(
attempt1.getAppAttemptId());
Assert.assertEquals(0, allocResponse.getAllocatedContainers().size());
Assert.assertEquals(0, report.getNumReservedContainers());
// container should get allocated on this node
nm2.nodeHeartbeat(true);
while (allocResponse.getAllocatedContainers().size() == 0) {
Thread.sleep(100);
allocResponse = am1.schedule();
}
report =
rm.getResourceScheduler().getAppResourceUsageReport(
attempt1.getAppAttemptId());
Assert.assertEquals(1, allocResponse.getAllocatedContainers().size());
Assert.assertEquals(0, report.getNumReservedContainers());
rm.stop();
}
示例13: allocate
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Override
public Allocation allocate(ApplicationAttemptId appAttemptId,
List<ResourceRequest> ask, List<ContainerId> release, List<String> blacklistAdditions, List<String> blacklistRemovals) {
// Make sure this application exists
FSAppAttempt application = getSchedulerApp(appAttemptId);
if (application == null) {
LOG.info("Calling allocate on removed " +
"or non existant application " + appAttemptId);
return EMPTY_ALLOCATION;
}
// Sanity check
SchedulerUtils.normalizeRequests(ask, new DominantResourceCalculator(),
clusterResource, minimumAllocation, getMaximumResourceCapability(),
incrAllocation);
// Record container allocation start time
application.recordContainerRequestTime(getClock().getTime());
// Set amResource for this app
if (!application.getUnmanagedAM() && ask.size() == 1
&& application.getLiveContainers().isEmpty()) {
application.setAMResource(ask.get(0).getCapability());
}
// Release containers
releaseContainers(release, application);
synchronized (application) {
if (!ask.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: pre-update" +
" applicationAttemptId=" + appAttemptId +
" application=" + application.getApplicationId());
}
application.showRequests();
// Update application requests
application.updateResourceRequests(ask);
application.showRequests();
}
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: post-update" +
" applicationAttemptId=" + appAttemptId +
" #ask=" + ask.size() +
" reservation= " + application.getCurrentReservation());
LOG.debug("Preempting " + application.getPreemptionContainers().size()
+ " container(s)");
}
Set<ContainerId> preemptionContainerIds = new HashSet<ContainerId>();
for (RMContainer container : application.getPreemptionContainers()) {
preemptionContainerIds.add(container.getContainerId());
}
application.updateBlacklist(blacklistAdditions, blacklistRemovals);
ContainersAndNMTokensAllocation allocation =
application.pullNewlyAllocatedContainersAndNMTokens();
// Record container allocation time
if (!(allocation.getContainerList().isEmpty())) {
application.recordContainerAllocationTime(getClock().getTime());
}
return new Allocation(allocation.getContainerList(),
application.getHeadroom(), preemptionContainerIds, null, null,
allocation.getNMTokenList());
}
}
示例14: allocate
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Override
public Allocation allocate(ApplicationAttemptId appAttemptId,
List<ResourceRequest> ask, List<ContainerId> release, List<String> blacklistAdditions, List<String> blacklistRemovals) {
// Make sure this application exists
FSSchedulerApp application = applications.get(appAttemptId);
if (application == null) {
LOG.info("Calling allocate on removed " +
"or non existant application " + appAttemptId);
return EMPTY_ALLOCATION;
}
//correct!
/**
for (ResourceRequest a : ask) {
LOG.info("resource before normalized: " + a.getCapability());
}
*/
// Sanity check
SchedulerUtils.normalizeRequests(ask, new DominantResourceCalculator(),
clusterCapacity, minimumAllocation, maximumAllocation, incrAllocation);
// for (ResourceRequest a : ask) {
// LOG.info("resource after normalized: " + a.getCapability());
// }
// Release containers
for (ContainerId releasedContainerId : release) {
RMContainer rmContainer = getRMContainer(releasedContainerId);
if (rmContainer == null) {
RMAuditLogger.logFailure(application.getUser(),
AuditConstants.RELEASE_CONTAINER,
"Unauthorized access or invalid container", "FairScheduler",
"Trying to release container not owned by app or with invalid id",
application.getApplicationId(), releasedContainerId);
}
completedContainer(rmContainer,
SchedulerUtils.createAbnormalContainerStatus(
releasedContainerId,
SchedulerUtils.RELEASED_CONTAINER),
RMContainerEventType.RELEASED);
}
synchronized (application) {
if (!ask.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("allocate: pre-update" +
" applicationAttemptId=" + appAttemptId +
" application=" + application.getApplicationId());
}
application.showRequests();
// Update application requests
application.updateResourceRequests(ask);
LOG.debug("allocate: post-update");
application.showRequests();
}
if (LOG.isDebugEnabled()) {
LOG.debug("allocate:" +
" applicationAttemptId=" + appAttemptId +
" #ask=" + ask.size());
LOG.debug("Preempting " + application.getPreemptionContainers().size()
+ " container(s)");
}
Set<ContainerId> preemptionContainerIds = new HashSet<ContainerId>();
for (RMContainer container : application.getPreemptionContainers()) {
preemptionContainerIds.add(container.getContainerId());
}
return new Allocation(application.pullNewlyAllocatedContainers(),
application.getHeadroom(), preemptionContainerIds);
}
}
示例15: testResourceTypes
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; //导入依赖的package包/类
@Test(timeout = 3000000)
public void testResourceTypes() throws Exception {
HashMap<YarnConfiguration, EnumSet<SchedulerResourceTypes>> driver =
new HashMap<YarnConfiguration, EnumSet<SchedulerResourceTypes>>();
CapacitySchedulerConfiguration csconf =
new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
YarnConfiguration testCapacityDRConf = new YarnConfiguration(csconf);
testCapacityDRConf.setClass(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class, ResourceScheduler.class);
YarnConfiguration testCapacityDefConf = new YarnConfiguration();
testCapacityDefConf.setClass(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class, ResourceScheduler.class);
YarnConfiguration testFairDefConf = new YarnConfiguration();
testFairDefConf.setClass(YarnConfiguration.RM_SCHEDULER,
FairScheduler.class, ResourceScheduler.class);
driver.put(conf, EnumSet.of(SchedulerResourceTypes.MEMORY));
driver.put(testCapacityDRConf,
EnumSet.of(SchedulerResourceTypes.CPU, SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.GPU));
driver.put(testCapacityDefConf, EnumSet.of(SchedulerResourceTypes.MEMORY));
driver.put(testFairDefConf,
EnumSet.of(SchedulerResourceTypes.CPU, SchedulerResourceTypes.MEMORY));
for (Map.Entry<YarnConfiguration, EnumSet<SchedulerResourceTypes>> entry : driver
.entrySet()) {
EnumSet<SchedulerResourceTypes> expectedValue = entry.getValue();
MockRM rm = new MockRM(entry.getKey());
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
RMApp app1 = rm.submitApp(2048);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
RegisterApplicationMasterResponse resp = am1.registerAppAttempt();
EnumSet<SchedulerResourceTypes> types = resp.getSchedulerResourceTypes();
LOG.info("types = " + types.toString());
Assert.assertEquals(expectedValue, types);
rm.stop();
}
}