本文整理匯總了Java中org.apache.hadoop.yarn.api.records.ContainerId.newContainerId方法的典型用法代碼示例。如果您正苦於以下問題:Java ContainerId.newContainerId方法的具體用法?Java ContainerId.newContainerId怎麽用?Java ContainerId.newContainerId使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.api.records.ContainerId
的用法示例。
在下文中一共展示了ContainerId.newContainerId方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: mockApplicationHistoryClientService
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
ApplicationHistoryClientService mockApplicationHistoryClientService(int numApps,
int numAppAttempts, int numContainers) throws Exception {
ApplicationHistoryManager ahManager =
new MockApplicationHistoryManagerImpl(store);
ApplicationHistoryClientService historyClientService =
new ApplicationHistoryClientService(ahManager);
for (int i = 1; i <= numApps; ++i) {
ApplicationId appId = ApplicationId.newInstance(0, i);
writeApplicationStartData(appId);
for (int j = 1; j <= numAppAttempts; ++j) {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, j);
writeApplicationAttemptStartData(appAttemptId);
for (int k = 1; k <= numContainers; ++k) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
writeApplicationAttemptFinishData(appAttemptId);
}
writeApplicationFinishData(appId);
}
return historyClientService;
}
示例2: getContainerId
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
private static ContainerId getContainerId(ApplicationId applicationId,
int startCount) {
ApplicationAttemptId appAttemptId =
getApplicationAttemptId(applicationId, startCount);
ContainerId containerId =
ContainerId.newContainerId(appAttemptId, startCount);
return containerId;
}
示例3: testWriteHistoryData
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
private void testWriteHistoryData(
int num, boolean missingContainer, boolean missingApplicationAttempt)
throws IOException {
// write application history data
for (int i = 1; i <= num; ++i) {
ApplicationId appId = ApplicationId.newInstance(0, i);
writeApplicationStartData(appId);
// write application attempt history data
for (int j = 1; j <= num; ++j) {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, j);
writeApplicationAttemptStartData(appAttemptId);
if (missingApplicationAttempt && j == num) {
continue;
}
// write container history data
for (int k = 1; k <= num; ++k) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, k);
writeContainerStartData(containerId);
if (missingContainer && k == num) {
continue;
}
writeContainerFinishData(containerId);
}
writeApplicationAttemptFinishData(appAttemptId);
}
writeApplicationFinishData(appId);
}
}
示例4: testContainersHelpCommand
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test (timeout = 10000)
public void testContainersHelpCommand() throws Exception {
ApplicationCLI cli = createAndGetAppCLI();
ApplicationCLI spyCli = spy(cli);
int result = spyCli.run(new String[] { "container", "-help" });
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class), any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),
sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(applicationId, 6);
result = cli.run(
new String[] {"container", "-list", appAttemptId.toString(), "args" });
verify(spyCli).printUsage(any(String.class), any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),
sysOutStream.toString());
sysOutStream.reset();
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 7);
result = cli.run(
new String[] { "container", "-status", containerId.toString(), "args" });
verify(spyCli).printUsage(any(String.class), any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),
sysOutStream.toString());
}
示例5: testExistenceOfResourceRequestInRMContainer
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test
public void testExistenceOfResourceRequestInRMContainer() throws Exception {
Configuration conf = new Configuration();
MockRM rm1 = new MockRM(conf);
rm1.start();
MockNM nm1 = rm1.registerNode("unknownhost:1234", 8000);
RMApp app1 = rm1.submitApp(1024);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
ResourceScheduler scheduler = rm1.getResourceScheduler();
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
ContainerId containerId2 = ContainerId.newContainerId(
am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
// Verify whether list of ResourceRequest is present in RMContainer
// while moving to ALLOCATED state
Assert.assertNotNull(scheduler.getRMContainer(containerId2)
.getResourceRequests());
// Allocate container
am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>())
.getAllocatedContainers();
rm1.waitForState(nm1, containerId2, RMContainerState.ACQUIRED);
// After RMContainer moving to ACQUIRED state, list of ResourceRequest will
// be empty
Assert.assertNull(scheduler.getRMContainer(containerId2)
.getResourceRequests());
}
示例6: createAMInfo
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
private static AMInfo createAMInfo(int attempt) {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(100, 1), attempt);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(),
containerId, NM_HOST, NM_PORT, NM_HTTP_PORT);
}
示例7: testContainerTokenGeneratedOnPullRequest
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test
public void testContainerTokenGeneratedOnPullRequest() throws Exception {
MockRM rm1 = new MockRM(conf);
rm1.start();
MockNM nm1 = rm1.registerNode("127.0.0.1:1234", 8000);
RMApp app1 = rm1.submitApp(200);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// request a container.
am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
ContainerId containerId2 =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
RMContainer container =
rm1.getResourceScheduler().getRMContainer(containerId2);
// no container token is generated.
Assert.assertEquals(containerId2, container.getContainerId());
Assert.assertNull(container.getContainer().getContainerToken());
// acquire the container.
List<Container> containers =
am1.allocate(new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>()).getAllocatedContainers();
Assert.assertEquals(containerId2, containers.get(0).getId());
// container token is generated.
Assert.assertNotNull(containers.get(0).getContainerToken());
rm1.stop();
}
示例8: createNMContainerStatus
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
public static NMContainerStatus createNMContainerStatus(int id,
ContainerState containerState) {
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "recover container", 0,
Priority.newInstance(10), 0);
return containerReport;
}
示例9: handle
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void handle(ContainerAllocatorEvent event) {
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
LOG.info("Processing the event " + event.toString());
// Assign the same container ID as the AM
ContainerId cID =
ContainerId.newContainerId(getContext().getApplicationAttemptId(),
this.containerId.getContainerId());
Container container = recordFactory.newRecordInstance(Container.class);
container.setId(cID);
NodeId nodeId = NodeId.newInstance(this.nmHost, this.nmPort);
container.setNodeId(nodeId);
container.setContainerToken(null);
container.setNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort);
// send the container-assigned event to task attempt
if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(event.getAttemptID().getTaskId()
.getJobId());
// TODO Setting OTHER_LOCAL_MAP for now.
jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
eventHandler.handle(jce);
}
eventHandler.handle(new TaskAttemptContainerAssignedEvent(
event.getAttemptID(), container, applicationACLs));
}
}
示例10: testAppDiognosticEventOnUnassignedTask
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test
public void testAppDiognosticEventOnUnassignedTask() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener,
new Token(), new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId,
"Task got killed"));
assertFalse(
"InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task",
eventHandler.internalError);
}
示例11: testRecoverRequestAfterPreemption
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test(timeout=5000)
public void testRecoverRequestAfterPreemption() throws Exception {
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 10);
MockClock clock = new MockClock();
scheduler.setClock(clock);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
Priority priority = Priority.newInstance(20);
String host = "127.0.0.1";
int GB = 1024;
// Create Node and raised Node Added event
RMNode node = MockNodes.newNodeInfo(1,
Resources.createResource(16 * 1024, 4, 4), 0, host);
NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
// Create 3 container requests and place it in ask
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest nodeLocalRequest = createResourceRequest(GB, 1, 1, host,
priority.getPriority(), 1, true);
ResourceRequest rackLocalRequest = createResourceRequest(GB, 1, 1,
node.getRackName(), priority.getPriority(), 1, true);
ResourceRequest offRackRequest = createResourceRequest(GB, 1, 1,
ResourceRequest.ANY, priority.getPriority(), 1, true);
ask.add(nodeLocalRequest);
ask.add(rackLocalRequest);
ask.add(offRackRequest);
// Create Request and update
ApplicationAttemptId appAttemptId = createSchedulingRequest("queueA",
"user1", ask);
scheduler.update();
// Sufficient node check-ins to fully schedule containers
NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeUpdate);
assertEquals(1, scheduler.getSchedulerApp(appAttemptId).getLiveContainers()
.size());
FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
// ResourceRequest will be empty once NodeUpdate is completed
Assert.assertNull(app.getResourceRequest(priority, host));
ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1);
RMContainer rmContainer = app.getRMContainer(containerId1);
// Create a preempt event and register for preemption
scheduler.warnOrKillContainer(rmContainer);
// Wait for few clock ticks
clock.tick(5);
// preempt now
scheduler.warnOrKillContainer(rmContainer);
List<ResourceRequest> requests = rmContainer.getResourceRequests();
// Once recovered, resource request will be present again in app
Assert.assertEquals(3, requests.size());
for (ResourceRequest request : requests) {
Assert.assertEquals(1,
app.getResourceRequest(priority, request.getResourceName())
.getNumContainers());
}
// Send node heartbeat
scheduler.update();
scheduler.handle(nodeUpdate);
List<Container> containers = scheduler.allocate(appAttemptId,
Collections.<ResourceRequest> emptyList(),
Collections.<ContainerId> emptyList(), null, null).getContainers();
// Now with updated ResourceRequest, a container is allocated for AM.
Assert.assertTrue(containers.size() == 1);
}
示例12: testContainerKillWhileRunning
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test
public void testContainerKillWhileRunning() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId,
0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener, new Token(),
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container,
mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
assertEquals("Task attempt is not in running state", taImpl.getState(),
TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",
eventHandler.internalError);
assertEquals("Task should be in KILLED state",
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
taImpl.getInternalState());
}
示例13: testLaunchFailedWhileKilling
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test
public void testLaunchFailedWhileKilling() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), null);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
assertFalse(eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local node",
Locality.NODE_LOCAL, taImpl.getLocality());
}
示例14: testPoolLimits
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test(timeout = 5000)
public void testPoolLimits() throws InterruptedException {
ApplicationId appId = ApplicationId.newInstance(12345, 67);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 3);
JobId jobId = MRBuilderUtils.newJobId(appId, 8);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10);
AppContext context = mock(AppContext.class);
CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
context);
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
containerLauncher.init(conf);
containerLauncher.start();
ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
// 10 different hosts
containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
for (int i = 0; i < 10; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 10);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// 4 more different hosts, but thread pool size should be capped at 12
containerLauncher.expectedCorePoolSize = 12 ;
for (int i = 1; i <= 4; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host1" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 12);
Assert.assertEquals(12, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Make some threads ideal so that remaining events are also done.
containerLauncher.finishEventHandling = true;
waitForEvents(containerLauncher, 14);
Assert.assertEquals(12, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
示例15: testGetContainerReportException
import org.apache.hadoop.yarn.api.records.ContainerId; //導入方法依賴的package包/類
@Test
public void testGetContainerReportException() throws Exception {
ApplicationCLI cli = createAndGetAppCLI();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
applicationId, 1);
long cntId = 1;
ContainerId containerId1 = ContainerId.newContainerId(attemptId, cntId++);
when(client.getContainerReport(containerId1)).thenThrow(
new ApplicationNotFoundException("History file for application"
+ applicationId + " is not found"));
int exitCode = cli.run(new String[] { "container", "-status",
containerId1.toString() });
verify(sysOut).println(
"Application for Container with id '" + containerId1
+ "' doesn't exist in RM or Timeline Server.");
Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
ContainerId containerId2 = ContainerId.newContainerId(attemptId, cntId++);
when(client.getContainerReport(containerId2)).thenThrow(
new ApplicationAttemptNotFoundException(
"History file for application attempt" + attemptId
+ " is not found"));
exitCode = cli.run(new String[] { "container", "-status",
containerId2.toString() });
verify(sysOut).println(
"Application Attempt for Container with id '" + containerId2
+ "' doesn't exist in RM or Timeline Server.");
Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
ContainerId containerId3 = ContainerId.newContainerId(attemptId, cntId++);
when(client.getContainerReport(containerId3)).thenThrow(
new ContainerNotFoundException("History file for container"
+ containerId3 + " is not found"));
exitCode = cli.run(new String[] { "container", "-status",
containerId3.toString() });
verify(sysOut).println(
"Container with id '" + containerId3
+ "' doesn't exist in RM or Timeline Server.");
Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
}