当前位置: 首页>>代码示例>>Java>>正文


Java NetworkedJob.killTask方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.JobClient.NetworkedJob.killTask方法的典型用法代码示例。如果您正苦于以下问题:Java NetworkedJob.killTask方法的具体用法?Java NetworkedJob.killTask怎么用?Java NetworkedJob.killTask使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.JobClient.NetworkedJob的用法示例。


在下文中一共展示了NetworkedJob.killTask方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFailedTaskJobStatus

import org.apache.hadoop.mapred.JobClient.NetworkedJob; //导入方法依赖的package包/类
/**
 * Verifying the running job status whether it succeeds or not
 * after failing some of its tasks.
 */
@Test
public void testFailedTaskJobStatus() throws IOException, 
        InterruptedException {
  conf = remoteJTClient.getDaemonConf();
  TaskInfo taskInfo = null;
  SleepJob job = new SleepJob();
  job.setConf(conf);
  JobConf jobConf = job.setupJobConf(1, 1, 10000, 4000, 100, 100);
  RunningJob runJob = jobClient.submitJob(jobConf);
  JobID jobId = runJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));
  TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(jobId);
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
      taskInfo = taskinfo;
      break;
    }
  }
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  // Fail the running task.
  NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
  networkJob.killTask(taskAttID, true);

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = remoteJTClient.getJobInfo(jobId);
  }
  Assert.assertEquals("JobStatus", JobStatus.SUCCEEDED, 
     jInfo.getStatus().getRunState());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:42,代码来源:TestTaskKilling.java

示例2: testJobCleanupAfterJobFail

import org.apache.hadoop.mapred.JobClient.NetworkedJob; //导入方法依赖的package包/类
/**
 * Submit a job and create folders and files in work folder with 
 * non-writable permissions under task attempt id folder.
 * Fail the job and verify whether the files and folders
 * are cleaned up or not.
 * @throws IOException
 */
@Test
public void testJobCleanupAfterJobFail() throws IOException {
  HashMap<TTClient,ArrayList<String>> map = 
      new HashMap<TTClient,ArrayList<String>>();
  conf = rtClient.getDaemonConf();
  SleepJob job = new SleepJob();
  job.setConf(conf);
  JobConf jobConf = job.setupJobConf(1, 0, 10000,0, 10, 10);
  JobClient client = jtClient.getClient();
  RunningJob runJob = client.submitJob(jobConf);
  JobID jobId = runJob.getID();
  JobInfo jobInfo = rtClient.getJobInfo(jobId);
  Assert.assertTrue("Job has not been started for 1 min", 
      jtClient.isJobStarted(jobId));
  TaskInfo [] taskInfos = rtClient.getTaskInfo(jobId);
  boolean isFailTask = false;
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup()) {        
      Assert.assertTrue("Task has not been started for 1 min ",
          jtClient.isTaskStarted(taskinfo));
      String tasktracker = getTaskTracker(taskinfo);
      Assert.assertNotNull("TaskTracker has not been found", tasktracker);
      TTClient ttclient = getTTClient(tasktracker);        
      map.put(ttClient, getTTClientMapRedLocalDirs(ttClient, 
          taskinfo, jobId));
      if (!isFailTask) {
        Assert.assertNotNull("TaskInfo is null.", taskinfo);
        TaskID taskId = TaskID.downgrade(taskinfo.getTaskID());
        TaskAttemptID taskAttID = new TaskAttemptID(taskId, 
            taskinfo.numFailedAttempts());
        int MAX_MAP_TASK_ATTEMPTS = Integer.
             parseInt(jobConf.get("mapred.map.max.attempts"));
        while(taskinfo.numFailedAttempts() < MAX_MAP_TASK_ATTEMPTS) {
          NetworkedJob networkJob = jtClient.getClient().
             new NetworkedJob(jobInfo.getStatus());
          networkJob.killTask(taskAttID, true);
          taskinfo = rtClient.getTaskInfo(taskinfo.getTaskID());
          taskAttID = new TaskAttemptID(taskId, taskinfo.numFailedAttempts());
          jobInfo = rtClient.getJobInfo(jobId);
        }
        isFailTask=true;
      }
    }
  }
  LOG.info("Waiting till the job is completed...");
  Assert.assertTrue("Job has not been completed for 1 min",
      jtClient.isJobStopped(jobId));
  jobInfo = rtClient.getJobInfo(jobId);
  Assert.assertEquals("Job has not been failed", 
          jobInfo.getStatus().getRunState(), JobStatus.FAILED);
  UtilsForTests.waitFor(3000); 
  Assert.assertTrue("Directories have not been cleaned up " + 
      "after completion of job", verifyJobDirectoryCleanup(map));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:62,代码来源:TestJobCacheDirectoriesCleanUp.java

示例3: testStatusOfKilledTaskWithSignalSleepTime

import org.apache.hadoop.mapred.JobClient.NetworkedJob; //导入方法依赖的package包/类
/**
 * Set the sleep time for the tasks is 3 seconds and kill the task using sigkill.
 * Verify whether task is killed after 3 seconds or not. 
 */
@Test
public void testStatusOfKilledTaskWithSignalSleepTime() 
    throws IOException, Exception {
  String runtimeArgs [] = {
      "-D", "mapred.job.name=Numbers Sum",
      "-D", "mapred.map.tasks=1",
      "-D", "mapred.reduce.tasks=1",
      "-D", "mapred.tasktracker.tasks.sleeptime-before-sigkill=3000" };

  JobID jobId = getJobIdOfRunningStreamJob(runtimeArgs);    
  Assert.assertNotNull("Job ID not found for 1 min", jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));
  
  TaskInfo taskInfo = getTaskInfoOfRunningStreamJob(jobId);
  Assert.assertNotNull("TaskInfo is null",taskInfo);
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));

  JobInfo jInfo = wovenClient.getJobInfo(jobId); 
  NetworkedJob networkJob = client.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, false);

  int counter = 0;
  while (counter++ < 60) {
    if (taskInfo.getTaskStatus().length == 0) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.KILLED_UNCLEAN) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else {
      break;
    }
  }
  Assert.assertTrue("Task has been killed before sigkill " + 
      "sleep time of 3 secs.", counter > 3 && TaskStatus.State.KILLED == 
      taskInfo.getTaskStatus()[0].getRunState());

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = wovenClient.getJobInfo(jobId);
  }
  Assert.assertEquals("Job has not been succeeded.", 
          jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:59,代码来源:TestTaskKillingOfStreamingJob.java

示例4: testStreamingJobStatusForFailedTask

import org.apache.hadoop.mapred.JobClient.NetworkedJob; //导入方法依赖的package包/类
/**
 * Set the maximum attempts for the maps and reducers are one.
 * Failed the task and verify whether streaming job is failed or not.
 */
@Test
public void testStreamingJobStatusForFailedTask() throws IOException {
  String runtimeArgs [] = {
      "-D", "mapred.job.name=Numbers Sum",
      "-D", "mapred.map.tasks=1",
      "-D", "mapred.reduce.tasks=1",
      "-D", "mapred.map.max.attempts=1",
      "-D", "mapred.reduce.max.attempts=1"};

  JobID jobId = getJobIdOfRunningStreamJob(runtimeArgs);
  Assert.assertNotNull("Job ID not found for 1 min", jobId);
  Assert.assertTrue("Job has not been started for 1 min.", 
      jtClient.isJobStarted(jobId));

  TaskInfo taskInfo = getTaskInfoOfRunningStreamJob(jobId);
  Assert.assertNotNull("TaskInfo is null",taskInfo);
  Assert.assertTrue("Task has not been started for 1 min.", 
      jtClient.isTaskStarted(taskInfo));
  
  JobInfo jInfo = wovenClient.getJobInfo(jobId);
  NetworkedJob networkJob = client.new NetworkedJob(jInfo.getStatus());
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, true);

  int counter = 0;
  while (counter++ < 60) {
    if (taskInfo.getTaskStatus().length == 0) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    }else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else if (taskInfo.getTaskStatus()[0].getRunState() == 
        TaskStatus.State.FAILED_UNCLEAN) {
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    } else {
      break;
    }
  }
  Assert.assertTrue("Task has not been Failed" , TaskStatus.State.FAILED == 
      taskInfo.getTaskStatus()[0].getRunState());

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = wovenClient.getJobInfo(jobId);
  }
  Assert.assertEquals("Job has not been failed", 
      jInfo.getStatus().getRunState(), JobStatus.FAILED);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:58,代码来源:TestTaskKillingOfStreamingJob.java

示例5: testFailedTaskJobStatus

import org.apache.hadoop.mapred.JobClient.NetworkedJob; //导入方法依赖的package包/类
/**
 * Verifying the running job status whether it succeeds or not after failing
 * some of its tasks.
 * 
 * @throws ClassNotFoundException
 */
@Test
public void testFailedTaskJobStatus()
    throws IOException, InterruptedException, ClassNotFoundException {
  Configuration conf = new Configuration(cluster.getConf());
  TaskInfo taskInfo = null;
  SleepJob job = new SleepJob();
  job.setConf(conf);
  Job slpJob = job.createJob(3, 1, 4000, 4000, 100, 100);
  JobConf jobConf = new JobConf(conf);
  jobConf.setMaxMapAttempts(20);
  jobConf.setMaxReduceAttempts(20);
  slpJob.submit();
  RunningJob runJob =
      jobClient.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
          .getJobID()));
  JobID id = runJob.getID();
  JobInfo jInfo = remoteJTClient.getJobInfo(id);
  int counter = 0;
  while (counter < 60) {
    if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
      break;
    } else {
      UtilsForTests.waitFor(1000);
      jInfo = remoteJTClient.getJobInfo(id);
    }
    counter++;
  }
  Assert.assertTrue("Job has not been started for 1 min.", counter != 60);

  TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
  for (TaskInfo taskinfo : taskInfos) {
    if (!taskinfo.isSetupOrCleanup()) {
      taskInfo = taskinfo;
    }
  }

  counter = 0;
  taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
  while (counter < 60) {
    if (taskInfo.getTaskStatus().length > 0) {
      if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) {
        break;
      }
    }
    UtilsForTests.waitFor(1000);
    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
    counter++;
  }
  Assert.assertTrue("Task has not been started for 1 min.", counter != 60);

  NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster);
  TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
  TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
  networkJob.killTask(taskAttID, false);

  LOG.info("Waiting till the job is completed...");
  while (!jInfo.getStatus().isJobComplete()) {
    UtilsForTests.waitFor(100);
    jInfo = remoteJTClient.getJobInfo(id);
  }

  Assert.assertEquals(
      "JobStatus", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED);
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:71,代码来源:TestTaskKilling.java


注:本文中的org.apache.hadoop.mapred.JobClient.NetworkedJob.killTask方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。