本文整理汇总了Java中org.apache.hadoop.mapreduce.MapReduceTestUtil.createKillJob方法的典型用法代码示例。如果您正苦于以下问题:Java MapReduceTestUtil.createKillJob方法的具体用法?Java MapReduceTestUtil.createKillJob怎么用?Java MapReduceTestUtil.createKillJob使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.MapReduceTestUtil
的用法示例。
在下文中一共展示了MapReduceTestUtil.createKillJob方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testKilledJob
import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
private void testKilledJob(String fileName,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
job.setOutputFormatClass(output);
job.submit();
// wait for the setup to be completed
while (job.setupProgress() != 1.0f) {
UtilsForTests.waitFor(100);
}
job.killJob(); // kill the job
assertFalse("Job did not get kill", job.waitForCompletion(true));
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for job " + job.getJobID(), fs
.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for killed job "
+ job.getJobID(), fs.exists(file));
}
}
示例2: testJobControlWithKillJob
import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public void testJobControlWithKillJob() throws Exception {
LOG.info("Starting testJobControlWithKillJob");
Configuration conf = createJobConf();
cleanupData(conf);
Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
JobControl theControl = createDependencies(conf, job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
// verify adding dependingJo to RUNNING job fails.
assertFalse(cjob1.addDependingJob(cjob2));
// suspend jobcontrol and resume it again
theControl.suspend();
assertTrue(
theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
theControl.resume();
// kill the first job.
cjob1.killJob();
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
theControl.stop();
}
示例3: testKilledJob
import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
private void testKilledJob(String fileName,
Class<? extends OutputFormat> output, String[] exclude) throws Exception {
Path outDir = getNewOutputDir();
Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
job.setOutputFormatClass(output);
job.submit();
// wait for the setup to be completed
while (job.setupProgress() != 1.0f) {
UtilsForTests.waitFor(100);
}
job.killJob(); // kill the job
assertFalse("Job did not get kill", job.waitForCompletion(true));
if (fileName != null) {
Path testFile = new Path(outDir, fileName);
assertTrue("File " + testFile + " missing for job ", fs.exists(testFile));
}
// check if the files from the missing set exists
for (String ex : exclude) {
Path file = new Path(outDir, ex);
assertFalse("File " + file + " should not be present for killed job ", fs
.exists(file));
}
}
示例4: testJobControlWithKillJob
import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
public void testJobControlWithKillJob() throws Exception {
Configuration conf = createJobConf();
cleanupData(conf);
Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
JobControl theControl = createDependencies(conf, job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
// verify adding dependingJo to RUNNING job fails.
assertFalse(cjob1.addDependingJob(cjob2));
// suspend jobcontrol and resume it again
theControl.suspend();
assertTrue(
theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
theControl.resume();
// kill the first job.
cjob1.killJob();
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
theControl.stop();
}
示例5: testJobControlWithKillJob
import org.apache.hadoop.mapreduce.MapReduceTestUtil; //导入方法依赖的package包/类
@Test
public void testJobControlWithKillJob() throws Exception {
LOG.info("Starting testJobControlWithKillJob");
Configuration conf = createJobConf();
cleanupData(conf);
Job job1 = MapReduceTestUtil.createKillJob(conf, outdir_1, indir);
JobControl theControl = createDependencies(conf, job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
// verify adding dependingJo to RUNNING job fails.
assertFalse(cjob1.addDependingJob(cjob2));
// suspend jobcontrol and resume it again
theControl.suspend();
assertTrue(
theControl.getThreadState() == JobControl.ThreadState.SUSPENDED);
theControl.resume();
// kill the first job.
cjob1.killJob();
// wait till all the jobs complete
waitTillAllFinished(theControl);
assertTrue(cjob1.getJobState() == ControlledJob.State.FAILED);
assertTrue(cjob2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(cjob3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(cjob4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
theControl.stop();
}