本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils.newTaskId方法的典型用法代码示例。如果您正苦于以下问题:Java MRBuilderUtils.newTaskId方法的具体用法?Java MRBuilderUtils.newTaskId怎么用?Java MRBuilderUtils.newTaskId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils
的用法示例。
在下文中一共展示了MRBuilderUtils.newTaskId方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createTce
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
private static TaskAttemptCompletionEvent createTce(int eventId,
boolean isMap, TaskAttemptCompletionEventStatus status) {
JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP
: org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
TaskAttemptCompletionEvent tce = recordFactory
.newRecordInstance(TaskAttemptCompletionEvent.class);
tce.setEventId(eventId);
tce.setAttemptId(attemptId);
tce.setStatus(status);
return tce;
}
示例2: testTimeout
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testTimeout() throws InterruptedException {
EventHandler mockHandler = mock(EventHandler.class);
Clock clock = new SystemClock();
TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1);
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.TASK_TIMEOUT, 10); //10 ms
conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms
hb.init(conf);
hb.start();
try {
ApplicationId appId = ApplicationId.newInstance(0l, 5);
JobId jobId = MRBuilderUtils.newJobId(appId, 4);
TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP);
TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2);
hb.register(taid);
Thread.sleep(100);
//Events only happen when the task is canceled
verify(mockHandler, times(2)).handle(any(Event.class));
} finally {
hb.stop();
}
}
示例3: createDeallocateEvent
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
int taskAttemptId, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId =
MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
return new ContainerAllocatorEvent(attemptId,
ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
}
示例4: createFailEvent
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
String host, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
taskAttemptId);
return new ContainerFailedEvent(attemptId, host);
}
示例5: testCompletedTask
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testCompletedTask() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
Map<TaskId, Task> mapTasks = completedJob.getTasks(TaskType.MAP);
Map<TaskId, Task> reduceTasks = completedJob.getTasks(TaskType.REDUCE);
assertEquals(10, mapTasks.size());
assertEquals(2, reduceTasks.size());
Task mt1 = mapTasks.get(mt1Id);
assertEquals(1, mt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED, mt1.getState());
TaskReport mt1Report = mt1.getReport();
assertEquals(TaskState.SUCCEEDED, mt1Report.getTaskState());
assertEquals(mt1Id, mt1Report.getTaskId());
Task rt1 = reduceTasks.get(rt1Id);
assertEquals(1, rt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED, rt1.getState());
TaskReport rt1Report = rt1.getReport();
assertEquals(TaskState.SUCCEEDED, rt1Report.getTaskState());
assertEquals(rt1Id, rt1Report.getTaskId());
}
示例6: makeTaskAttemptId
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId,
TaskType taskType, int id) {
ApplicationId aID = ApplicationId.newInstance(ts, appId);
JobId jID = MRBuilderUtils.newJobId(aID, id);
TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
return MRBuilderUtils.newTaskAttemptId(tID, id);
}
示例7: testRenameMapOutputForReduce
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test
public void testRenameMapOutputForReduce() throws Exception {
final JobConf conf = new JobConf();
final MROutputFiles mrOutputFiles = new MROutputFiles();
mrOutputFiles.setConf(conf);
// make sure both dirs are distinct
//
conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
Assert.assertNotEquals("Paths must be different!",
mapOut.getParent(), mapOutIdx.getParent());
// make both dirs part of LOCAL_DIR
conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
final FileContext lfc = FileContext.getLocalFSFileContext(conf);
lfc.create(mapOut, EnumSet.of(CREATE)).close();
lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
}
示例8: createMapTaskAttemptImplForTest
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
private TaskAttemptImpl createMapTaskAttemptImplForTest(
EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
ApplicationId appId = ApplicationId.newInstance(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
Path jobFile = mock(Path.class);
JobConf jobConf = new JobConf();
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
taskSplitMetaInfo, jobConf, taListener, null,
null, clock, null);
return taImpl;
}
示例9: testCompletedTaskAttempt
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test (timeout=10000)
public void testCompletedTaskAttempt() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);
Task mt1 = completedJob.getTask(mt1Id);
Task rt1 = completedJob.getTask(rt1Id);
TaskAttempt mta1 = mt1.getAttempt(mta1Id);
assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999", mta1.getNodeHttpAddress());
TaskAttemptReport mta1Report = mta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
assertEquals("localhost", mta1Report.getNodeManagerHost());
assertEquals(45454, mta1Report.getNodeManagerPort());
assertEquals(9999, mta1Report.getNodeManagerHttpPort());
TaskAttempt rta1 = rt1.getAttempt(rta1Id);
assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999", rta1.getNodeHttpAddress());
TaskAttemptReport rta1Report = rta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
assertEquals("localhost", rta1Report.getNodeManagerHost());
assertEquals(45454, rta1Report.getNodeManagerPort());
assertEquals(9999, rta1Report.getNodeManagerHttpPort());
}
示例10: TaskImpl
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
public TaskImpl(JobId jobId, TaskType taskType, int partition,
EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
this.conf = conf;
this.clock = clock;
this.jobFile = remoteJobConfFile;
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
readLock = readWriteLock.readLock();
writeLock = readWriteLock.writeLock();
this.attempts = Collections.emptyMap();
this.finishedAttempts = new HashSet<TaskAttemptId>(2);
this.failedAttempts = new HashSet<TaskAttemptId>(2);
this.inProgressAttempts = new HashSet<TaskAttemptId>(2);
// This overridable method call is okay in a constructor because we
// have a convention that none of the overrides depends on any
// fields that need initialization.
maxAttempts = getMaxAttempts();
taskId = MRBuilderUtils.newTaskId(jobId, partition, taskType);
this.partition = partition;
this.taskAttemptListener = taskAttemptListener;
this.eventHandler = eventHandler;
this.credentials = credentials;
this.jobToken = jobToken;
this.metrics = metrics;
this.appContext = appContext;
this.encryptedShuffle = conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
stateMachine = stateMachineFactory.make(this);
// All the new TaskAttemptIDs are generated based on MR
// ApplicationAttemptID so that attempts from previous lives don't
// over-step the current one. This assumes that a task won't have more
// than 1000 attempts in its single generation, which is very reasonable.
nextAttemptNumber = (appAttemptId - 1) * 1000;
}
示例11: testAverageReduceTime
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test
public void testAverageReduceTime() {
Job job = mock(CompletedJob.class);
final Task task1 = mock(Task.class);
final Task task2 = mock(Task.class);
JobId jobId = MRBuilderUtils.newJobId(1L, 1, 1);
final TaskId taskId1 = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
final TaskId taskId2 = MRBuilderUtils.newTaskId(jobId, 2, TaskType.REDUCE);
final TaskAttemptId taskAttemptId1 = MRBuilderUtils.
newTaskAttemptId(taskId1, 1);
final TaskAttemptId taskAttemptId2 = MRBuilderUtils.
newTaskAttemptId(taskId2, 2);
final TaskAttempt taskAttempt1 = mock(TaskAttempt.class);
final TaskAttempt taskAttempt2 = mock(TaskAttempt.class);
JobReport jobReport = mock(JobReport.class);
when(taskAttempt1.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt1.getLaunchTime()).thenReturn(0L);
when(taskAttempt1.getShuffleFinishTime()).thenReturn(4L);
when(taskAttempt1.getSortFinishTime()).thenReturn(6L);
when(taskAttempt1.getFinishTime()).thenReturn(8L);
when(taskAttempt2.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt2.getLaunchTime()).thenReturn(5L);
when(taskAttempt2.getShuffleFinishTime()).thenReturn(10L);
when(taskAttempt2.getSortFinishTime()).thenReturn(22L);
when(taskAttempt2.getFinishTime()).thenReturn(42L);
when(task1.getType()).thenReturn(TaskType.REDUCE);
when(task2.getType()).thenReturn(TaskType.REDUCE);
when(task1.getAttempts()).thenReturn
(new HashMap<TaskAttemptId, TaskAttempt>()
{{put(taskAttemptId1,taskAttempt1); }});
when(task2.getAttempts()).thenReturn
(new HashMap<TaskAttemptId, TaskAttempt>()
{{put(taskAttemptId2,taskAttempt2); }});
when(job.getTasks()).thenReturn
(new HashMap<TaskId, Task>()
{{ put(taskId1,task1); put(taskId2, task2); }});
when(job.getID()).thenReturn(jobId);
when(job.getReport()).thenReturn(jobReport);
when(job.getName()).thenReturn("TestJobInfo");
when(job.getState()).thenReturn(JobState.SUCCEEDED);
JobInfo jobInfo = new JobInfo(job);
Assert.assertEquals(11L, jobInfo.getAvgReduceTime().longValue());
}
示例12: testContainerCleanedWhileRunning
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test
public void testContainerCleanedWhileRunning() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
assertEquals("Task attempt is not in running state", taImpl.getState(),
TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local rack",
Locality.RACK_LOCAL, taImpl.getLocality());
}
示例13: testContainerCleanedWhileCommitting
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test
public void testContainerCleanedWhileCommitting() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task attempt is not in commit pending state", taImpl.getState(),
TaskAttemptState.COMMIT_PENDING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
assertEquals("Task attempt is assigned locally", Locality.OFF_SWITCH,
taImpl.getLocality());
}
示例14: createTaskId
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
private TaskId createTaskId(long clusterTimestamp, int jobIdInt,
int taskIdInt, TaskType taskType) {
return MRBuilderUtils.newTaskId(createJobId(clusterTimestamp, jobIdInt),
taskIdInt, taskType);
}
示例15: testTooManyFetchFailureAfterKill
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; //导入方法依赖的package包/类
@Test
public void testTooManyFetchFailureAfterKill() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
mock(Token.class), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
TaskAttemptState.KILLED);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in KILLED state, still", taImpl.getState(),
TaskAttemptState.KILLED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
}