当前位置: 首页>>代码示例>>Java>>正文


Java TaskStateInternal类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal的典型用法代码示例。如果您正苦于以下问题:Java TaskStateInternal类的具体用法?Java TaskStateInternal怎么用?Java TaskStateInternal使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskStateInternal类属于org.apache.hadoop.mapreduce.v2.app.job包,在下文中一共展示了TaskStateInternal类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: handle

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
@Override
public void handle(TaskEvent event) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Processing " + event.getTaskID() + " of type "
        + event.getType());
  }
  try {
    writeLock.lock();
    TaskStateInternal oldState = getInternalState();
    try {
      stateMachine.doTransition(event.getType(), event);
    } catch (InvalidStateTransitonException e) {
      LOG.error("Can't handle this event at current state for "
          + this.taskId, e);
      internalError(event.getType());
    }
    if (oldState != getInternalState()) {
      LOG.info(taskId + " Task Transitioned from " + oldState + " to "
          + getInternalState());
    }

  } finally {
    writeLock.unlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TaskImpl.java

示例2: createTaskFailedEvent

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
private static TaskFailedEvent createTaskFailedEvent(TaskImpl task, List<String> diag, TaskStateInternal taskState, TaskAttemptId taId) {
  StringBuilder errorSb = new StringBuilder();
  if (diag != null) {
    for (String d : diag) {
      errorSb.append(", ").append(d);
    }
  }
  TaskFailedEvent taskFailedEvent = new TaskFailedEvent(
      TypeConverter.fromYarn(task.taskId),
   // Hack since getFinishTime needs isFinished to be true and that doesn't happen till after the transition.
      task.getFinishTime(taId),
      TypeConverter.fromYarn(task.getType()),
      errorSb.toString(),
      taskState.toString(),
      taId == null ? null : TypeConverter.fromYarn(taId),
      task.getCounters());
  return taskFailedEvent;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TaskImpl.java

示例3: transition

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
@Override
public void transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
  task.handleTaskAttemptCompletion(
      taskAttemptId, 
      TaskAttemptCompletionEventStatus.SUCCEEDED);
  task.finishedAttempts.add(taskAttemptId);
  task.inProgressAttempts.remove(taskAttemptId);
  task.successfulAttempt = taskAttemptId;
  task.sendTaskSucceededEvents();
  for (TaskAttempt attempt : task.attempts.values()) {
    if (attempt.getID() != task.successfulAttempt &&
        // This is okay because it can only talk us out of sending a
        //  TA_KILL message to an attempt that doesn't need one for
        //  other reasons.
        !attempt.isFinished()) {
      LOG.info("Issuing kill to other attempt " + attempt.getID());
      task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
          SPECULATION + task.successfulAttempt + " succeeded first!"));
    }
  }
  task.finished(TaskStateInternal.SUCCEEDED);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TaskImpl.java

示例4: waitForInternalState

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
public void waitForInternalState(TaskImpl task,
    TaskStateInternal finalState) throws Exception {
  int timeoutSecs = 0;
  TaskReport report = task.getReport();
  TaskStateInternal iState = task.getInternalState();
  while (!finalState.equals(iState) && timeoutSecs++ < 20) {
    System.out.println("Task Internal State is : " + iState
        + " Waiting for Internal state : " + finalState + "   progress : "
        + report.getProgress());
    Thread.sleep(500);
    report = task.getReport();
    iState = task.getInternalState();
  }
  System.out.println("Task Internal State is : " + iState);
  Assert.assertEquals("Task Internal state is not correct (timedout)",
      finalState, iState);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:MRApp.java

示例5: handle

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
@Override
public void handle(TaskEvent event) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Processing " + event.getTaskID() + " of type "
        + event.getType());
  }
  try {
    writeLock.lock();
    TaskStateInternal oldState = getInternalState();
    try {
      stateMachine.doTransition(event.getType(), event);
    } catch (InvalidStateTransitionException e) {
      LOG.error("Can't handle this event at current state for "
          + this.taskId, e);
      internalError(event.getType());
    }
    if (oldState != getInternalState()) {
      LOG.info(taskId + " Task Transitioned from " + oldState + " to "
          + getInternalState());
    }

  } finally {
    writeLock.unlock();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TaskImpl.java

示例6: transition

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
@Override
public void transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
  task.handleTaskAttemptCompletion(
      taskAttemptId, 
      TaskAttemptCompletionEventStatus.SUCCEEDED);
  task.finishedAttempts.add(taskAttemptId);
  task.inProgressAttempts.remove(taskAttemptId);
  task.successfulAttempt = taskAttemptId;
  task.sendTaskSucceededEvents();
  for (TaskAttempt attempt : task.attempts.values()) {
    if (attempt.getID() != task.successfulAttempt &&
        // This is okay because it can only talk us out of sending a
        //  TA_KILL message to an attempt that doesn't need one for
        //  other reasons.
        !attempt.isFinished()) {
      LOG.info("Issuing kill to other attempt " + attempt.getID());
      task.eventHandler.handle(
          new TaskAttemptEvent(attempt.getID(), 
              TaskAttemptEventType.TA_KILL));
    }
  }
  task.finished(TaskStateInternal.SUCCEEDED);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:TaskImpl.java

示例7: sendTaskSucceededEvents

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
private void sendTaskSucceededEvents() {
JobTaskEvent jobTaskEvent = new JobTaskEvent(taskId, TaskState.SUCCEEDED);
long    totalTime   = this.getFinishTime() - this.getLaunchTime();
long    HDFSRecords = this.getSuccessfulAttempt().getCounters().findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
long    executionTime  = this.getSuccessfulAttempt().getEndExecutionTime() - this.getSuccessfulAttempt().getBeginExecutionTime();
double  executionSpeed = HDFSRecords*1.0 / executionTime*1.0;
double  executionRatio = 1.0*executionTime/ totalTime;
LOG.info("inform");
LOG.info("hdfsRecrds:"+HDFSRecords);
LOG.info("excutuinTime:"+executionTime);
LOG.info("totalTime:"+executionTime);
LOG.info("excutionSpeed:"+executionSpeed);
LOG.info("excutionRatio:"+executionRatio);
LOG.info("host:"+this.getSuccessfulAttempt().getNodeId().getHost());
LOG.info("/inform");
jobTaskEvent.setTaskExecutionTime((long)executionSpeed);
jobTaskEvent.setTaskExecutionRatio(executionRatio);
jobTaskEvent.setAttemptId(successfulAttempt);
   eventHandler.handle(jobTaskEvent);
   if (historyTaskStartGenerated) {
     TaskFinishedEvent tfe = createTaskFinishedEvent(this,
         TaskStateInternal.SUCCEEDED);
     eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
   }
 }
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:26,代码来源:TaskImpl.java

示例8: transition

import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal; //导入依赖的package包/类
@Override
public void transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
  task.handleTaskAttemptCompletion(
      taskAttemptId, 
      TaskAttemptCompletionEventStatus.SUCCEEDED);
  task.finishedAttempts.add(taskAttemptId);
  task.inProgressAttempts.remove(taskAttemptId);
  task.successfulAttempt = taskAttemptId;
  for (TaskAttempt attempt : task.attempts.values()) {
    if (attempt.getID() != task.successfulAttempt &&
        // This is okay because it can only talk us out of sending a
        //  TA_KILL message to an attempt that doesn't need one for
        //  other reasons.
        !attempt.isFinished()) {
      LOG.info("Issuing kill to other attempt " + attempt.getID());
      task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
          SPECULATION + task.successfulAttempt + " succeeded first!"));
    }
  }
  task.finished(TaskStateInternal.SUCCEEDED);
  task.sendTaskSucceededEvents();
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:25,代码来源:TaskImpl.java


注:本文中的org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。