当前位置: 首页>>代码示例>>Java>>正文


Java TaskState类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.api.records.TaskState的典型用法代码示例。如果您正苦于以下问题:Java TaskState类的具体用法?Java TaskState怎么用?Java TaskState使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskState类属于org.apache.hadoop.mapreduce.v2.api.records包,在下文中一共展示了TaskState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: TaskInfo

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
public TaskInfo(Task task) {
  TaskType ttype = task.getType();
  this.type = ttype.toString();
  TaskReport report = task.getReport();
  this.startTime = report.getStartTime();
  this.finishTime = report.getFinishTime();
  this.state = report.getTaskState();
  this.elapsedTime = Times.elapsed(this.startTime, this.finishTime,
    this.state == TaskState.RUNNING);
  if (this.elapsedTime == -1) {
    this.elapsedTime = 0;
  }
  this.progress = report.getProgress() * 100;
  this.status =  report.getStatus();
  this.id = MRApps.toString(task.getID());
  this.taskNum = task.getID().getId();
  this.successful = getSuccessfulAttempt(task);
  if (successful != null) {
    this.successfulAttempt = MRApps.toString(successful.getID());
  } else {
    this.successfulAttempt = "";
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TaskInfo.java

示例2: transition

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Override
public JobStateInternal transition(JobImpl job, JobEvent event) {
  job.completedTaskCount++;
  LOG.info("Num completed Tasks: " + job.completedTaskCount);
  JobTaskEvent taskEvent = (JobTaskEvent) event;
  Task task = job.tasks.get(taskEvent.getTaskID());
  if (taskEvent.getState() == TaskState.SUCCEEDED) {
    taskSucceeded(job, task);
  } else if (taskEvent.getState() == TaskState.FAILED) {
    taskFailed(job, task);
  } else if (taskEvent.getState() == TaskState.KILLED) {
    taskKilled(job, task);
  }

  return checkJobAfterTaskCompletion(job);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:JobImpl.java

示例3: testJobError

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Test
public void testJobError() throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an invalid event on task at current state
  app.getContext().getEventHandler().handle(
      new TaskEvent(
          task.getID(), TaskEventType.T_SCHEDULE));

  //this must lead to job error
  app.waitForState(job, JobState.ERROR);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestMRApp.java

示例4: testJobRebootNotLastRetryOnUnregistrationFailure

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Test
public void testJobRebootNotLastRetryOnUnregistrationFailure()
    throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an reboot event
  app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
    JobEventType.JOB_AM_REBOOT));

  // return exteranl state as RUNNING since otherwise the JobClient will
  // prematurely exit.
  app.waitForState(job, JobState.RUNNING);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestMRApp.java

示例5: testJobRebootOnLastRetryOnUnregistrationFailure

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Test
public void testJobRebootOnLastRetryOnUnregistrationFailure()
    throws Exception {
  // make startCount as 2 since this is last retry which equals to
  // DEFAULT_MAX_AM_RETRY
  // The last param mocks the unregistration failure
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2, false);

  Configuration conf = new Configuration();
  Job job = app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an reboot event
  app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
    JobEventType.JOB_AM_REBOOT));

  app.waitForInternalState((JobImpl) job, JobStateInternal.REBOOT);
  // return exteranl state as RUNNING if this is the last retry while
  // unregistration fails
  app.waitForState(job, JobState.RUNNING);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestMRApp.java

示例6: testMRAppHistory

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
private void testMRAppHistory(MRApp app) throws Exception {
  Configuration conf = new Configuration();
  Job job = app.submit(conf);
  app.waitForState(job, JobState.FAILED);
  Map<TaskId, Task> tasks = job.getTasks();

  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.FAILED, task
      .getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next()
      .getAttempts();
  Assert.assertEquals("Num attempts is not correct", 4, attempts.size());

  Iterator<TaskAttempt> it = attempts.values().iterator();
  TaskAttemptReport report = it.next().getReport();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      report.getTaskAttemptState());
  Assert.assertEquals("Diagnostic Information is not Correct",
      "Test Diagnostic Event", report.getDiagnosticInfo());
  report = it.next().getReport();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      report.getTaskAttemptState());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestTaskAttempt.java

示例7: completeJobTasks

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
private static void completeJobTasks(JobImpl job) {
  // complete the map tasks and the reduce tasks so we start committing
  int numMaps = job.getTotalMaps();
  for (int i = 0; i < numMaps; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
  int numReduces = job.getTotalReduces();
  for (int i = 0; i < numReduces; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobImpl.java

示例8: testFailTask

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Test
//First attempt is failed and second attempt is passed
//The job succeeds.
public void testFailTask() throws Exception {
  MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
  Configuration conf = new Configuration();
  // this test requires two task attempts, but uberization overrides max to 1
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.SUCCEEDED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
  //one attempt must be failed 
  //and another must have succeeded
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      it.next().getReport().getTaskAttemptState());
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
      it.next().getReport().getTaskAttemptState());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestFail.java

示例9: testTimedOutTask

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Test
//All Task attempts are timed out, leading to Job failure
public void testTimedOutTask() throws Exception {
  MRApp app = new TimeOutTaskMRApp(1, 0);
  Configuration conf = new Configuration();
  int maxAttempts = 2;
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
  // disable uberization (requires entire job to be reattempted, so max for
  // subtask attempts is overridden to 1)
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.FAILED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.FAILED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", maxAttempts,
      attempts.size());
  for (TaskAttempt attempt : attempts.values()) {
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
        attempt.getReport().getTaskAttemptState());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestFail.java

示例10: fromYarn

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
public static org.apache.hadoop.mapred.TIPStatus fromYarn(
    TaskState state) {
  switch (state) {
  case NEW:
  case SCHEDULED:
    return org.apache.hadoop.mapred.TIPStatus.PENDING;
  case RUNNING:
    return org.apache.hadoop.mapred.TIPStatus.RUNNING;
  case KILLED:
    return org.apache.hadoop.mapred.TIPStatus.KILLED;
  case SUCCEEDED:
    return org.apache.hadoop.mapred.TIPStatus.COMPLETE;
  case FAILED:
    return org.apache.hadoop.mapred.TIPStatus.FAILED;
  }
  throw new YarnRuntimeException("Unrecognized task state: " + state);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TypeConverter.java

示例11: testEnums

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));
  
  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }
  
  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }
  
  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }
  
  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestTypeConverter.java

示例12: computeFinishedMaps

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
    int numSuccessfulMaps) {
  if (numMaps == numSuccessfulMaps) {
    return jobInfo.getFinishedMaps();
  }

  long numFinishedMaps = 0;
  Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
      .getAllTasks();
  for (TaskInfo taskInfo : taskInfos.values()) {
    if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
      ++numFinishedMaps;
    }
  }
  return numFinishedMaps;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestJobHistoryParsing.java

示例13: getTask

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
private Task getTask(long timestamp) {
  
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  jobId.setAppId(ApplicationIdPBImpl.newInstance(timestamp,1));

  TaskId taskId = new TaskIdPBImpl();
  taskId.setId(0);
  taskId.setTaskType(TaskType.REDUCE);
  taskId.setJobId(jobId);
  Task task = mock(Task.class);
  when(task.getID()).thenReturn(taskId);
  TaskReport report = mock(TaskReport.class);
  when(report.getProgress()).thenReturn(0.7f);
  when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
  when(report.getStartTime()).thenReturn(100001L);
  when(report.getFinishTime()).thenReturn(100011L);

  when(task.getReport()).thenReturn(report);
  when(task.getType()).thenReturn(TaskType.REDUCE);
  return task;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestBlocks.java

示例14: getState

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
@Override
public TaskState getState() {
  readLock.lock();
  try {
    return getExternalState(getInternalState());
  } finally {
    readLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TaskImpl.java

示例15: getExternalState

import org.apache.hadoop.mapreduce.v2.api.records.TaskState; //导入依赖的package包/类
private static TaskState getExternalState(TaskStateInternal smState) {
  if (smState == TaskStateInternal.KILL_WAIT) {
    return TaskState.KILLED;
  } else {
    return TaskState.valueOf(smState.name());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TaskImpl.java


注:本文中的org.apache.hadoop.mapreduce.v2.api.records.TaskState类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。