本文整理汇总了Java中org.apache.hadoop.mapred.UtilsForTests.waitTillDone方法的典型用法代码示例。如果您正苦于以下问题:Java UtilsForTests.waitTillDone方法的具体用法?Java UtilsForTests.waitTillDone怎么用?Java UtilsForTests.waitTillDone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.UtilsForTests
的用法示例。
在下文中一共展示了UtilsForTests.waitTillDone方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRestartWithoutRecovery
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Tests the jobtracker with restart-recovery turned off.
* Submit a job with normal priority, maps = 2, reducers = 0}
*
* Wait for the job to complete 50%
*
* Restart the jobtracker with recovery turned off
*
* Check if the job is missing
*/
@Test
public void testRestartWithoutRecovery(MiniDFSCluster dfs,
MiniMRCluster mr)
throws Exception {
// III. Test a job with waiting mapper and recovery turned off
FileSystem fileSys = dfs.getFileSystem();
cleanUp(fileSys, shareDir);
JobConf newConf = getJobs(mr.createJobConf(),
new JobPriority[] {JobPriority.NORMAL},
new int[] {2}, new int[] {0},
outputDir, inDir,
getMapSignalFile(shareDir),
getReduceSignalFile(shareDir))[0];
JobClient jobClient = new JobClient(newConf);
RunningJob job = jobClient.submitJob(newConf);
JobID id = job.getID();
// make sure that the job is 50% completed
while (UtilsForTests.getJobStatus(jobClient, id).mapProgress() < 0.5f) {
UtilsForTests.waitFor(100);
}
mr.stopJobTracker();
// Turn off the recovery
mr.getJobTrackerConf().setBoolean("mapred.jobtracker.restart.recover",
false);
// Wait for a minute before submitting a job
UtilsForTests.waitFor(60 * 1000);
mr.startJobTracker();
// Signal the tasks
UtilsForTests.signalTasks(dfs, fileSys, true, getMapSignalFile(shareDir),
getReduceSignalFile(shareDir));
// Wait for the JT to be ready
UtilsForTests.waitForJobTracker(jobClient);
UtilsForTests.waitTillDone(jobClient);
// The submitted job should not exist
assertTrue("Submitted job was detected with recovery disabled",
UtilsForTests.getJobStatus(jobClient, id) == null);
}
示例2: testRunningTaskCount
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Test if running tasks are correctly maintained for various types of jobs
*/
private void testRunningTaskCount(boolean speculation, boolean locality)
throws Exception {
LOG.info("Testing running jobs with speculation : " + speculation
+ ", locality : " + locality);
// cleanup
dfsCluster.getFileSystem().delete(TEST_DIR, true);
final Path mapSignalFile = new Path(TEST_DIR, "map-signal");
final Path redSignalFile = new Path(TEST_DIR, "reduce-signal");
// configure a waiting job with 2 maps and 2 reducers
JobConf job =
configure(UtilsForTests.WaitingMapper.class, IdentityReducer.class, 1, 1,
locality);
job.set(UtilsForTests.getTaskSignalParameter(true), mapSignalFile.toString());
job.set(UtilsForTests.getTaskSignalParameter(false), redSignalFile.toString());
// Disable slow-start for reduces since this maps don't complete
// in these test-cases...
job.setFloat("mapred.reduce.slowstart.completed.maps", 0.0f);
// test jobs with speculation
job.setSpeculativeExecution(speculation);
JobClient jc = new JobClient(job);
RunningJob running = jc.submitJob(job);
JobTracker jobtracker = mrCluster.getJobTrackerRunner().getJobTracker();
JobInProgress jip = jobtracker.getJob(running.getID());
LOG.info("Running job " + jip.getJobID());
// wait
LOG.info("Waiting for job " + jip.getJobID() + " to be ready");
waitTillReady(jip, job);
// check if the running structures are populated
Set<TaskInProgress> uniqueTasks = new HashSet<TaskInProgress>();
for (Map.Entry<Node, Set<TaskInProgress>> s :
jip.getRunningMapCache().entrySet()) {
uniqueTasks.addAll(s.getValue());
}
// add non local map tasks
uniqueTasks.addAll(jip.getNonLocalRunningMaps());
assertEquals("Running map count doesnt match for jobs with speculation "
+ speculation + ", and locality " + locality,
jip.runningMaps(), uniqueTasks.size());
assertEquals("Running reducer count doesnt match for jobs with speculation "
+ speculation + ", and locality " + locality,
jip.runningReduces(), jip.getRunningReduces().size());
// signal the tasks
LOG.info("Signaling the tasks");
UtilsForTests.signalTasks(dfsCluster, dfsCluster.getFileSystem(),
mapSignalFile.toString(),
redSignalFile.toString(), numSlaves);
// wait for the job to complete
LOG.info("Waiting for job " + jip.getJobID() + " to be complete");
UtilsForTests.waitTillDone(jc);
// cleanup
dfsCluster.getFileSystem().delete(TEST_DIR, true);
}
示例3: testRunningTaskCount
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Test if running tasks are correctly maintained for various types of jobs
*/
private void testRunningTaskCount(boolean speculation, boolean locality)
throws Exception {
LOG.info("Testing running jobs with speculation : " + speculation
+ ", locality : " + locality);
// cleanup
dfsCluster.getFileSystem().delete(TEST_DIR, true);
final Path mapSignalFile = new Path(TEST_DIR, "map-signal");
final Path redSignalFile = new Path(TEST_DIR, "reduce-signal");
// configure a waiting job with 2 maps and 2 reducers
JobConf job =
configure(UtilsForTests.WaitingMapper.class, IdentityReducer.class, 1, 1,
locality);
job.set(UtilsForTests.getTaskSignalParameter(true), mapSignalFile.toString());
job.set(UtilsForTests.getTaskSignalParameter(false), redSignalFile.toString());
// Disable slow-start for reduces since this maps don't complete
// in these test-cases...
job.setFloat("mapred.reduce.slowstart.completed.maps", 0.0f);
// test jobs with speculation
job.setSpeculativeExecution(speculation);
JobClient jc = new JobClient(job);
RunningJob running = jc.submitJob(job);
JobTracker jobtracker = mrCluster.getJobTrackerRunner().getJobTracker();
JobInProgress jip = jobtracker.getJob(running.getID());
LOG.info("Running job " + jip.getJobID());
// wait
LOG.info("Waiting for job " + jip.getJobID() + " to be ready");
waitTillReady(jip, job);
// check if the running structures are populated
Set<TaskInProgress> uniqueTasks = new HashSet<TaskInProgress>();
for (Map.Entry<Node, Set<TaskInProgress>> s :
jip.getRunningMapCache().entrySet()) {
uniqueTasks.addAll(s.getValue());
}
// add non local map tasks
uniqueTasks.addAll(jip.getNonLocalRunningMaps());
assertEquals("Running map count doesnt match for jobs with speculation "
+ speculation + ", and locality " + locality,
jip.runningMaps(), uniqueTasks.size());
assertEquals("Running reducer count doesnt match for jobs with speculation "
+ speculation + ", and locality " + locality,
jip.runningReduces(), jip.getRunningReduces().size());
// signal the tasks
LOG.info("Signaling the tasks");
UtilsForTests.signalTasks(dfsCluster, dfsCluster.getFileSystem(),
mapSignalFile.toString(),
redSignalFile.toString(), numSlaves);
// wait for the job to complete
LOG.info("Waiting for job " + jip.getJobID() + " to be complete");
UtilsForTests.waitTillDone(jc);
// cleanup
dfsCluster.getFileSystem().delete(TEST_DIR, true);
}
示例4: testRestartWithoutRecovery
import org.apache.hadoop.mapred.UtilsForTests; //导入方法依赖的package包/类
/**
* Tests the jobtracker with restart-recovery turned off.
* Submit a job with normal priority, maps = 2, reducers = 0}
*
* Wait for the job to complete 50%
*
* Restart the jobtracker with recovery turned off
*
* Check if the job is missing
*/
public void testRestartWithoutRecovery(MiniDFSCluster dfs,
MiniMRCluster mr)
throws IOException {
// III. Test a job with waiting mapper and recovery turned off
FileSystem fileSys = dfs.getFileSystem();
cleanUp(fileSys, shareDir);
JobConf newConf = getJobs(mr.createJobConf(),
new JobPriority[] {JobPriority.NORMAL},
new int[] {2}, new int[] {0},
outputDir, inDir,
getMapSignalFile(shareDir),
getReduceSignalFile(shareDir))[0];
JobClient jobClient = new JobClient(newConf);
RunningJob job = jobClient.submitJob(newConf);
JobID id = job.getID();
// make sure that the job is 50% completed
while (UtilsForTests.getJobStatus(jobClient, id).mapProgress() < 0.5f) {
UtilsForTests.waitFor(100);
}
mr.stopJobTracker();
// Turn off the recovery
mr.getJobTrackerConf().setBoolean("mapred.jobtracker.restart.recover",
false);
// Wait for a minute before submitting a job
UtilsForTests.waitFor(60 * 1000);
mr.startJobTracker();
// Signal the tasks
UtilsForTests.signalTasks(dfs, fileSys, true, getMapSignalFile(shareDir),
getReduceSignalFile(shareDir));
// Wait for the JT to be ready
UtilsForTests.waitForJobTracker(jobClient);
UtilsForTests.waitTillDone(jobClient);
// The submitted job should not exist
assertTrue("Submitted job was detected with recovery disabled",
UtilsForTests.getJobStatus(jobClient, id) == null);
}