当前位置: 首页>>代码示例>>Java>>正文


Java MapReduceTestUtil.createKillJob方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.MapReduceTestUtil.createKillJob方法的典型用法代码示例。如果您正苦于以下问题:Java MapReduceTestUtil.createKillJob方法的具体用法?Java MapReduceTestUtil.createKillJob怎么用?Java MapReduceTestUtil.createKillJob使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.MapReduceTestUtil的用法示例。


在下文中一共展示了MapReduceTestUtil.createKillJob方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testKilledJob

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
private void testKilledJob(String fileName,
    Class<? extends OutputFormat> output, String[] exclude) throws Exception {
  Path outDir = getNewOutputDir();
  Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
  job.setOutputFormatClass(output);

  job.submit();

  // wait for the setup to be completed
  while (job.setupProgress() != 1.0f) {
    UtilsForTests.waitFor(100);
  }

  job.killJob(); // kill the job

  assertFalse("Job did not get kill", job.waitForCompletion(true));

  if (fileName != null) {
    Path testFile = new Path(outDir, fileName);
    assertTrue("File " + testFile + " missing for job " + job.getJobID(), fs
        .exists(testFile));
  }

  // check if the files from the missing set exists
  for (String ex : exclude) {
    Path file = new Path(outDir, ex);
    assertFalse("File " + file + " should not be present for killed job "
        + job.getJobID(), fs.exists(file));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestJobOutputCommitter.java

示例2: testJobControlWithKillJob

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public void testJobControlWithKillJob() throws Exception {
  LOG.info("Starting testJobControlWithKillJob");

  Configuration conf = createJobConf();
  cleanupData(conf);
  Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
  JobControl theControl = createDependencies(conf, job1);

  while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      break;
    }
  }
  // verify adding dependingJo to RUNNING job fails.
  assertFalse(cjob1.addDependingJob(cjob2));

  // suspend jobcontrol and resume it again
  theControl.suspend();
  assertTrue(
    theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
  theControl.resume();
  
  // kill the first job.
  cjob1.killJob();

  // wait till all the jobs complete
  waitTillAllFinished(theControl);
  
  assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
  assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
  assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
  assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);

  theControl.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestMapReduceJobControl.java

示例3: testKilledJob

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
private void testKilledJob(String fileName,
    Class<? extends OutputFormat> output, String[] exclude) throws Exception {
  Path outDir = getNewOutputDir();
  Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
  job.setOutputFormatClass(output);

  job.submit();

  // wait for the setup to be completed
  while (job.setupProgress() != 1.0f) {
    UtilsForTests.waitFor(100);
  }

  job.killJob(); // kill the job

  assertFalse("Job did not get kill", job.waitForCompletion(true));

  if (fileName != null) {
    Path testFile = new Path(outDir, fileName);
    assertTrue("File " + testFile + " missing for job ", fs.exists(testFile));
  }

  // check if the files from the missing set exists
  for (String ex : exclude) {
    Path file = new Path(outDir, ex);
    assertFalse("File " + file + " should not be present for killed job ", fs
        .exists(file));
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:30,代码来源:TestJobOutputCommitter.java

示例4: testJobControlWithKillJob

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public void testJobControlWithKillJob() throws Exception {
  Configuration conf = createJobConf();
  cleanupData(conf);
  Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
  JobControl theControl = createDependencies(conf, job1);

  while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      break;
    }
  }
  // verify adding dependingJo to RUNNING job fails.
  assertFalse(cjob1.addDependingJob(cjob2));

  // suspend jobcontrol and resume it again
  theControl.suspend();
  assertTrue(
    theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
  theControl.resume();
  
  // kill the first job.
  cjob1.killJob();

  // wait till all the jobs complete
  waitTillAllFinished(theControl);
  
  assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
  assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
  assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
  assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);

  theControl.stop();
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:TestMapReduceJobControl.java

示例5: testJobControlWithKillJob

import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
@Test
public void testJobControlWithKillJob() throws Exception {
  LOG.info("Starting testJobControlWithKillJob");

  Configuration conf = createJobConf();
  cleanupData(conf);
  Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
  JobControl theControl = createDependencies(conf, job1);

  while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      break;
    }
  }
  // verify adding dependingJo to RUNNING job fails.
  assertFalse(cjob1.addDependingJob(cjob2));

  // suspend jobcontrol and resume it again
  theControl.suspend();
  assertTrue(
    theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
  theControl.resume();
  
  // kill the first job.
  cjob1.killJob();

  // wait till all the jobs complete
  waitTillAllFinished(theControl);
  
  assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
  assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
  assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
  assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);

  theControl.stop();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:39,代码来源:TestMapReduceJobControl.java


注:本文中的org.apache.hadoop.mapreduce.MapReduceTestUtil.createKillJob方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。