本文整理汇总了Java中org.apache.hadoop.mapred.RunningJob.waitForCompletion方法的典型用法代码示例。如果您正苦于以下问题:Java RunningJob.waitForCompletion方法的具体用法?Java RunningJob.waitForCompletion怎么用?Java RunningJob.waitForCompletion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.RunningJob
的用法示例。
在下文中一共展示了RunningJob.waitForCompletion方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: mrRun
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void mrRun() throws Exception {
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
assertTrue(runJob.isComplete());
assertTrue(runJob.isSuccessful());
}
示例2: runJob
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
throws IOException, InterruptedException, ClassNotFoundException {
JobConf job = new JobConf(TEST_UTIL.getConfiguration());
job.setJobName(jobName);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);
TableMapReduceUtil.addDependencyJars(job);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(1); // one to get final "first" and "last" key
FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
LOG.info("Started " + job.getJobName());
RunningJob runningJob = JobClient.runJob(job);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
LOG.info("After map/reduce completion - job " + jobName);
}
示例3: encryptedShuffleWithCerts
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
useClientCerts);
conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer =
new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
示例4: encryptedShuffleWithCerts
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
useClientCerts);
conf.setBoolean(JobTracker.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf(conf));
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer =
new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf(conf));
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
示例5: start
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void start(String[] args, int startIndex) throws IOException {
try {
while(startIndex < args.length) {
String cmd = args[startIndex ++];
if (cmd.equals(NUM_MAPPERS_KEY)) {
numMappers = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_SUBDIRS_KEY)) {
numSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_FILES_SUB_DIR_KEY)) {
numFilesEachSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_ROUND_KEY)) {
round = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(WORKPLACE_KEY)) {
workplace = args[startIndex ++];
}
}
} catch (Exception e) {
printUsage();
System.exit(-1);
}
if (workplace.trim().isEmpty()) {
printUsage();
System.exit(-1);
}
if (!workplace.endsWith(Path.SEPARATOR)) {
workplace += Path.SEPARATOR;
}
JobConf jobConf = createJobConf(conf);
JobClient client = new JobClient(jobConf);
RunningJob runningJob = client.submitJob(jobConf);
runningJob.waitForCompletion();
}
示例6: testRunningJobLocally
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
* This method creates a sample MR job and submits that JobConf object to the
* static MiniClusterController method to be executed.
*/
@Test
public void testRunningJobLocally() throws IOException, InterruptedException {
JobConf sampleJob = createWordCountMRJobConf();
RunningJob runningJob = miniCluster_.runJob(sampleJob);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
}
示例7: testGetConfigForMiniCluster
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
* This method gets a JobConf object from the static MiniClusterController
* method, fills it with a sample MR job and then executes the job.
*/
@Test
public void testGetConfigForMiniCluster() throws IOException {
JobConf sampleJob = miniCluster_.getJobConf(WordCount.class);
fillInWordCountMRJobConf(sampleJob);
RunningJob runningJob = JobClient.runJob(sampleJob);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
}
示例8: start
import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void start(String[] args, int startIndex) throws IOException {
try {
while(startIndex < args.length) {
String cmd = args[startIndex ++];
if (cmd.equals(NUM_MAPPERS_KEY)) {
numMappers = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_SUBDIRS_KEY)) {
numSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_FILES_SUB_DIR_KEY)) {
numFilesEachSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_ROUND_KEY)) {
round = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(WORKPLACE_KEY)) {
workplace = args[startIndex ++];
} else if (cmd.equals(NOTIFIER_SERVER_ADDR_KEY)) {
notifierServerAddrStr = args[startIndex ++];
} else if (cmd.equals(NOTIFIER_SERVER_PORT_KEY)) {
notifierServerPortStr = args[startIndex ++];
} else {
printUsage();
System.exit(-1);
}
}
} catch (Exception e) {
printUsage();
System.exit(-1);
}
if (workplace.trim().isEmpty() ||
notifierServerAddrStr.trim().isEmpty()) {
printUsage();
System.exit(-1);
}
if (!workplace.endsWith(Path.SEPARATOR)) {
workplace += Path.SEPARATOR;
}
JobConf jobConf = createJobConf(conf);
JobClient client = new JobClient(jobConf);
RunningJob runningJob = client.submitJob(jobConf);
runningJob.waitForCompletion();
}