当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttemptEvent类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptEvent类的具体用法?Java TaskAttemptEvent怎么用?Java TaskAttemptEvent使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttemptEvent类属于org.apache.hadoop.mapreduce.v2.app.job.event包,在下文中一共展示了TaskAttemptEvent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: commitPending

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
/**
 * TaskAttempt is reporting that it is in commit_pending and it is waiting for
 * the commit Response
 * 
 * <br>
 * Commit it a two-phased protocol. First the attempt informs the
 * ApplicationMaster that it is
 * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
 * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
 * a legacy from the centralized commit protocol handling by the JobTracker.
 */
@Override
public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu)
        throws IOException, InterruptedException {
  LOG.info("Commit-pending state update from " + taskAttemptID.toString());
  // An attempt is asking if it can commit its output. This can be decided
  // only by the task which is managing the multiple attempts. So redirect the
  // request there.
  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);

  taskHeartbeatHandler.progressing(attemptID);
  //Ignorable TaskStatus? - since a task will send a LastStatusUpdate
  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, 
          TaskAttemptEventType.TA_COMMIT_PENDING));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TaskAttemptListenerImpl.java

示例2: transition

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Tell any speculator that we're requesting a container
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent(taskAttempt.getID().getTaskId(), +1));
  //request for container
  if (rescheduled) {
    taskAttempt.eventHandler.handle(
        ContainerRequestEvent.createContainerRequestEventForFailedContainer(
            taskAttempt.attemptId, 
            taskAttempt.resourceCapability));
  } else {
    taskAttempt.eventHandler.handle(new ContainerRequestEvent(
        taskAttempt.attemptId, taskAttempt.resourceCapability,
        taskAttempt.dataLocalHosts.toArray(
            new String[taskAttempt.dataLocalHosts.size()]),
        taskAttempt.dataLocalRacks.toArray(
            new String[taskAttempt.dataLocalRacks.size()])));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TaskAttemptImpl.java

示例3: killTaskAttempt

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public KillTaskAttemptResponse killTaskAttempt(
    KillTaskAttemptRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
  String message = "Kill task attempt " + taskAttemptId
      + " received from " + callerUGI + " at "
      + Server.getRemoteAddress();
  LOG.info(message);
  verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
  appContext.getEventHandler().handle(
      new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
  appContext.getEventHandler().handle(
      new TaskAttemptEvent(taskAttemptId, 
          TaskAttemptEventType.TA_KILL));
  KillTaskAttemptResponse response = 
    recordFactory.newRecordInstance(KillTaskAttemptResponse.class);
  return response;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:MRClientService.java

示例4: failTaskAttempt

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public FailTaskAttemptResponse failTaskAttempt(
    FailTaskAttemptRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
  String message = "Fail task attempt " + taskAttemptId
      + " received from " + callerUGI + " at "
      + Server.getRemoteAddress();
  LOG.info(message);
  verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
  appContext.getEventHandler().handle(
      new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
  appContext.getEventHandler().handle(
      new TaskAttemptEvent(taskAttemptId, 
          TaskAttemptEventType.TA_FAILMSG));
  FailTaskAttemptResponse response = recordFactory.
    newRecordInstance(FailTaskAttemptResponse.class);
  return response;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:MRClientService.java

示例5: dispatch

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
protected void dispatch(Event event) {
  if (event instanceof TaskAttemptEvent) {
    TaskAttemptEvent attemptEvent = (TaskAttemptEvent) event;
    TaskAttemptId attemptID = ((TaskAttemptEvent) event).getTaskAttemptID();
    if (attemptEvent.getType() == this.attemptEventTypeToWait
        && attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0 ) {
      try {
        latch.await();
      } catch (InterruptedException e) {
        e.printStackTrace();
      }
    }
  }
  super.dispatch(event);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestKill.java

示例6: attemptLaunched

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
  if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
    //this blocks the first task's first attempt
    //the subsequent ones are completed
    try {
      latch.await();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  } else {
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID,
            TaskAttemptEventType.TA_DONE));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestKill.java

示例7: handle

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
public void handle(ContainerLauncherEvent event) {
  switch (event.getType()) {
  case CONTAINER_REMOTE_LAUNCH:
    getContext().getEventHandler().handle(
        new TaskAttemptContainerLaunchedEvent(event.getTaskAttemptID(),
            shufflePort));
    
    attemptLaunched(event.getTaskAttemptID());
    break;
  case CONTAINER_REMOTE_CLEANUP:
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(event.getTaskAttemptID(),
            TaskAttemptEventType.TA_CONTAINER_CLEANED));
    break;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:MRApp.java

示例8: fatalError

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg)
    throws IOException {
  // This happens only in Child and in the Task.
  LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
  reportDiagnosticInfo(taskAttemptID, "Error: " + msg);

  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);

  // handling checkpoints
  preemptionPolicy.handleFailedContainer(attemptID);

  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TaskAttemptListenerImpl.java

示例9: transition

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // unregister it to TaskAttemptListener so that it stops listening
  // for it
  taskAttempt.taskAttemptListener.unregister(
      taskAttempt.attemptId, taskAttempt.jvmID);

  if (event instanceof TaskAttemptKillEvent) {
    taskAttempt.addDiagnosticInfo(
        ((TaskAttemptKillEvent) event).getMessage());
  }

  taskAttempt.reportedStatus.progress = 1.0f;
  taskAttempt.updateProgressSplits();
  //send the cleanup event to containerLauncher
  taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
      taskAttempt.attemptId, 
      taskAttempt.container.getId(), StringInterner
          .weakIntern(taskAttempt.container.getNodeId().toString()),
      taskAttempt.container.getContainerToken(),
      ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:25,代码来源:TaskAttemptImpl.java

示例10: failTaskAttempt

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public FailTaskAttemptResponse failTaskAttempt(
    FailTaskAttemptRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
  String message = "Fail task attempt " + taskAttemptId
      + " received from " + callerUGI + " at "
      + Server.getRemoteAddress();
  LOG.info(message);
  verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
  appContext.getEventHandler().handle(
      new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
  appContext.getEventHandler().handle(
      new TaskAttemptEvent(taskAttemptId, 
          TaskAttemptEventType.TA_FAILMSG_BY_CLIENT));
  FailTaskAttemptResponse response = recordFactory.
    newRecordInstance(FailTaskAttemptResponse.class);
  return response;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:MRClientService.java

示例11: testTaskAttemptDiagnosticEventOnFinishing

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Test
public void testTaskAttemptDiagnosticEventOnFinishing() throws Exception {
  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);

  taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
      TaskAttemptEventType.TA_DONE));

  assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
      TaskAttemptState.SUCCEEDED);
  assertEquals("Task attempt's internal state is not " +
      "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
      TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);

  // TA_DIAGNOSTICS_UPDATE doesn't change state
  taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(taImpl.getID(),
      "Task got updated"));
  assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
      TaskAttemptState.SUCCEEDED);
  assertEquals("Task attempt's internal state is not " +
      "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
      TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);

  assertFalse("InternalError occurred", eventHandler.internalError);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:TestTaskAttempt.java

示例12: testTimeoutWhileSuccessFinishing

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Test
public void testTimeoutWhileSuccessFinishing() throws Exception {
  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);

  taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
      TaskAttemptEventType.TA_DONE));

  assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
      TaskAttemptState.SUCCEEDED);
  assertEquals("Task attempt's internal state is not " +
      "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
      TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);

  // If the task stays in SUCCESS_FINISHING_CONTAINER for too long,
  // TaskAttemptListenerImpl will time out the attempt.
  taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
      TaskAttemptEventType.TA_TIMED_OUT));
  assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
      TaskAttemptState.SUCCEEDED);
  assertEquals("Task attempt's internal state is not " +
      "SUCCESS_CONTAINER_CLEANUP", taImpl.getInternalState(),
      TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP);

  assertFalse("InternalError occurred", eventHandler.internalError);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestTaskAttempt.java

示例13: handle

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
public void handle(ContainerLauncherEvent event) {
  switch (event.getType()) {
  case CONTAINER_REMOTE_LAUNCH:
    containerLaunched(event.getTaskAttemptID(), shufflePort);
    attemptLaunched(event.getTaskAttemptID());
    break;
  case CONTAINER_REMOTE_CLEANUP:
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(event.getTaskAttemptID(),
            TaskAttemptEventType.TA_CONTAINER_CLEANED));
    break;
  case CONTAINER_COMPLETED:
    break;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:MRApp.java

示例14: done

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
public void done(TaskAttemptID taskAttemptID) throws IOException {
  LOG.info("Done acknowledgement from " + taskAttemptID.toString());

  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);

  taskHeartbeatHandler.progressing(attemptID);

  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TaskAttemptListenerImpl.java

示例15: fatalError

import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; //导入依赖的package包/类
@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg)
    throws IOException {
  // This happens only in Child and in the Task.
  LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
  reportDiagnosticInfo(taskAttemptID, "Error: " + msg);

  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);
  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TaskAttemptListenerImpl.java


注:本文中的org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。