当前位置: 首页>>代码示例>>Java>>正文


Java TaskAttemptImpl类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptImpl类的具体用法?Java TaskAttemptImpl怎么用?Java TaskAttemptImpl使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TaskAttemptImpl类属于org.apache.hadoop.mapreduce.v2.app.job.impl包,在下文中一共展示了TaskAttemptImpl类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: waitForInternalState

import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl; //导入依赖的package包/类
public void waitForInternalState(TaskAttemptImpl attempt,
    TaskAttemptStateInternal finalState) throws Exception {
  int timeoutSecs = 0;
  TaskAttemptReport report = attempt.getReport();
  TaskAttemptStateInternal iState = attempt.getInternalState();
  while (!finalState.equals(iState) && timeoutSecs++ < 20) {
    System.out.println("TaskAttempt Internal State is : " + iState
        + " Waiting for Internal state : " + finalState + "   progress : "
        + report.getProgress());
    Thread.sleep(500);
    report = attempt.getReport();
    iState = attempt.getInternalState();
  }
  System.out.println("TaskAttempt Internal State is : " + iState);
  Assert.assertEquals("TaskAttempt Internal state is not correct (timedout)",
      finalState, iState);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:MRApp.java

示例2: testContainerPassThrough

import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl; //导入依赖的package包/类
@Test
public void testContainerPassThrough() throws Exception {
  MRApp app = new MRApp(0, 1, true, this.getClass().getName(), true) {
    @Override
    protected ContainerLauncher createContainerLauncher(AppContext context) {
      return new MockContainerLauncher() {
        @Override
        public void handle(ContainerLauncherEvent event) {
          if (event instanceof ContainerRemoteLaunchEvent) {
            containerObtainedByContainerLauncher =
                ((ContainerRemoteLaunchEvent) event).getAllocatedContainer();
          }
          super.handle(event);
        }
      };
    };
  };
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.SUCCEEDED);
  app.verifyCompleted();

  Collection<Task> tasks = job.getTasks().values();
  Collection<TaskAttempt> taskAttempts =
      tasks.iterator().next().getAttempts().values();
  TaskAttemptImpl taskAttempt =
      (TaskAttemptImpl) taskAttempts.iterator().next();
  // Container from RM should pass through to the launcher. Container object
  // should be the same.
 Assert.assertTrue(taskAttempt.container 
   == containerObtainedByContainerLauncher);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestMRApp.java

示例3: testTaskFailWithUnusedContainer

import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl; //导入依赖的package包/类
@Test
public void testTaskFailWithUnusedContainer() throws Exception {
  MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
  Configuration conf = new Configuration();
  int maxAttempts = 1;
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
  // disable uberization (requires entire job to be reattempted, so max for
  // subtask attempts is overridden to 1)
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  Map<TaskId, Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  app.waitForState(task, TaskState.SCHEDULED);
  Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
      .next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
      .size());
  TaskAttempt attempt = attempts.values().iterator().next();
  app.waitForInternalState((TaskAttemptImpl) attempt,
      TaskAttemptStateInternal.ASSIGNED);
  app.getDispatcher().getEventHandler().handle(
      new TaskAttemptEvent(attempt.getID(),
          TaskAttemptEventType.TA_CONTAINER_COMPLETED));
  app.waitForState(job, JobState.FAILED);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestFail.java

示例4: testSlowNM

import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl; //导入依赖的package包/类
@Test(timeout = 15000)
public void testSlowNM() throws Exception {

  conf = new Configuration();
  int maxAttempts = 1;
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  // set timeout low for the test
  conf.setInt("yarn.rpc.nm-command-timeout", 3000);
  conf.set(YarnConfiguration.IPC_RPC_IMPL, HadoopYarnProtoRPC.class.getName());
  YarnRPC rpc = YarnRPC.create(conf);
  String bindAddr = "localhost:0";
  InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
  NMTokenSecretManagerInNM tokenSecretManager =
      new NMTokenSecretManagerInNM();
  MasterKey masterKey = Records.newRecord(MasterKey.class);
  masterKey.setBytes(ByteBuffer.wrap("key".getBytes()));
  tokenSecretManager.setMasterKey(masterKey);
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
    "token");
  server =
      rpc.getServer(ContainerManagementProtocol.class,
        new DummyContainerManager(), addr, conf, tokenSecretManager, 1);
  server.start();

  MRApp app = new MRAppWithSlowNM(tokenSecretManager);

  try {
  Job job = app.submit(conf);
  app.waitForState(job, JobState.RUNNING);

  Map<TaskId, Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());

  Task task = tasks.values().iterator().next();
  app.waitForState(task, TaskState.SCHEDULED);

  Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
      .next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts,
        attempts.size());

  TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt,
        TaskAttemptStateInternal.ASSIGNED);

  app.waitForState(job, JobState.FAILED);

  String diagnostics = attempt.getDiagnostics().toString();
  LOG.info("attempt.getDiagnostics: " + diagnostics);

    Assert.assertTrue(diagnostics.contains("Container launch failed for "
        + "container_0_0000_01_000000 : "));
    Assert
        .assertTrue(diagnostics
            .contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel"));

  } finally {
    server.stop();
  app.stop();
}
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestContainerLauncher.java


注:本文中的org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。