本文整理匯總了Java中org.apache.hadoop.mapred.RunningJob.waitForCompletion方法的典型用法代碼示例。如果您正苦於以下問題:Java RunningJob.waitForCompletion方法的具體用法?Java RunningJob.waitForCompletion怎麽用?Java RunningJob.waitForCompletion使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.mapred.RunningJob
的用法示例。
在下文中一共展示了RunningJob.waitForCompletion方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: mrRun
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
private void mrRun() throws Exception {
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
assertTrue(runJob.isComplete());
assertTrue(runJob.isSuccessful());
}
示例2: runJob
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
throws IOException, InterruptedException, ClassNotFoundException {
JobConf job = new JobConf(TEST_UTIL.getConfiguration());
job.setJobName(jobName);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);
TableMapReduceUtil.addDependencyJars(job);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(1); // one to get final "first" and "last" key
FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
LOG.info("Started " + job.getJobName());
RunningJob runningJob = JobClient.runJob(job);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
LOG.info("After map/reduce completion - job " + jobName);
}
示例3: encryptedShuffleWithCerts
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
useClientCerts);
conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer =
new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
示例4: encryptedShuffleWithCerts
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
useClientCerts);
conf.setBoolean(JobTracker.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf(conf));
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer =
new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf(conf));
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
示例5: start
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void start(String[] args, int startIndex) throws IOException {
try {
while(startIndex < args.length) {
String cmd = args[startIndex ++];
if (cmd.equals(NUM_MAPPERS_KEY)) {
numMappers = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_SUBDIRS_KEY)) {
numSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_FILES_SUB_DIR_KEY)) {
numFilesEachSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_ROUND_KEY)) {
round = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(WORKPLACE_KEY)) {
workplace = args[startIndex ++];
}
}
} catch (Exception e) {
printUsage();
System.exit(-1);
}
if (workplace.trim().isEmpty()) {
printUsage();
System.exit(-1);
}
if (!workplace.endsWith(Path.SEPARATOR)) {
workplace += Path.SEPARATOR;
}
JobConf jobConf = createJobConf(conf);
JobClient client = new JobClient(jobConf);
RunningJob runningJob = client.submitJob(jobConf);
runningJob.waitForCompletion();
}
示例6: testRunningJobLocally
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
/**
* This method creates a sample MR job and submits that JobConf object to the
* static MiniClusterController method to be executed.
*/
@Test
public void testRunningJobLocally() throws IOException, InterruptedException {
JobConf sampleJob = createWordCountMRJobConf();
RunningJob runningJob = miniCluster_.runJob(sampleJob);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
}
示例7: testGetConfigForMiniCluster
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
/**
* This method gets a JobConf object from the static MiniClusterController
* method, fills it with a sample MR job and then executes the job.
*/
@Test
public void testGetConfigForMiniCluster() throws IOException {
JobConf sampleJob = miniCluster_.getJobConf(WordCount.class);
fillInWordCountMRJobConf(sampleJob);
RunningJob runningJob = JobClient.runJob(sampleJob);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
}
示例8: start
import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void start(String[] args, int startIndex) throws IOException {
try {
while(startIndex < args.length) {
String cmd = args[startIndex ++];
if (cmd.equals(NUM_MAPPERS_KEY)) {
numMappers = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_SUBDIRS_KEY)) {
numSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_FILES_SUB_DIR_KEY)) {
numFilesEachSubDirs = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(NUM_ROUND_KEY)) {
round = Integer.valueOf(args[startIndex ++]);
} else if (cmd.equals(WORKPLACE_KEY)) {
workplace = args[startIndex ++];
} else if (cmd.equals(NOTIFIER_SERVER_ADDR_KEY)) {
notifierServerAddrStr = args[startIndex ++];
} else if (cmd.equals(NOTIFIER_SERVER_PORT_KEY)) {
notifierServerPortStr = args[startIndex ++];
} else {
printUsage();
System.exit(-1);
}
}
} catch (Exception e) {
printUsage();
System.exit(-1);
}
if (workplace.trim().isEmpty() ||
notifierServerAddrStr.trim().isEmpty()) {
printUsage();
System.exit(-1);
}
if (!workplace.endsWith(Path.SEPARATOR)) {
workplace += Path.SEPARATOR;
}
JobConf jobConf = createJobConf(conf);
JobClient client = new JobClient(jobConf);
RunningJob runningJob = client.submitJob(jobConf);
runningJob.waitForCompletion();
}