本文整理汇总了Java中org.apache.hadoop.mapred.RunningJob类的典型用法代码示例。如果您正苦于以下问题:Java RunningJob类的具体用法?Java RunningJob怎么用?Java RunningJob使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RunningJob类属于org.apache.hadoop.mapred包,在下文中一共展示了RunningJob类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testInputFormat
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException {
final JobConf job = MapreduceTestingShim.getJobConf(mrCluster);
job.setInputFormat(clazz);
job.setOutputFormat(NullOutputFormat.class);
job.setMapperClass(ExampleVerifier.class);
job.setNumReduceTasks(0);
LOG.debug("submitting job.");
final RunningJob run = JobClient.runJob(job);
assertTrue("job failed!", run.isSuccessful());
assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters()
.findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter());
assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters()
.findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter());
assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters()
.findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter());
assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters()
.findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter());
assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters()
.findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter());
assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters()
.findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter());
}
示例2: configure
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
Path testdir = new Path(TEST_DIR.getAbsolutePath());
Path inDir = new Path(testdir, "in");
Path outDir = new Path(testdir, "out");
FileSystem fs = getFileSystem();
fs.delete(testdir, true);
conf.setInputFormat(TextInputFormat.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
conf.setKeyFieldComparatorOptions(keySpec);
conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
conf.setMapperClass(InverseMapper.class);
conf.setReducerClass(IdentityReducer.class);
if (!fs.mkdirs(testdir)) {
throw new IOException("Mkdirs failed to create " + testdir.toString());
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
// set up input data in 2 files
Path inFile = new Path(inDir, "part0");
FileOutputStream fos = new FileOutputStream(inFile.toString());
fos.write((line1 + "\n").getBytes());
fos.write((line2 + "\n").getBytes());
fos.close();
JobClient jc = new JobClient(conf);
RunningJob r_job = jc.submitJob(conf);
while (!r_job.isComplete()) {
Thread.sleep(1000);
}
if (!r_job.isSuccessful()) {
fail("Oops! The job broke due to an unexpected error");
}
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
//make sure we get what we expect as the first line, and also
//that we have two lines
if (expect == 1) {
assertTrue(line.startsWith(line1));
} else if (expect == 2) {
assertTrue(line.startsWith(line2));
}
line = reader.readLine();
if (expect == 1) {
assertTrue(line.startsWith(line2));
} else if (expect == 2) {
assertTrue(line.startsWith(line1));
}
reader.close();
}
}
示例3: mrRun
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
private void mrRun() throws Exception {
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
assertTrue(runJob.isComplete());
assertTrue(runJob.isSuccessful());
}
示例4: runJob
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
/**
* Submit/run a map/reduce job.
*
* @param job
* @return true for success
* @throws IOException
*/
public static boolean runJob(JobConf job) throws IOException {
JobClient jc = new JobClient(job);
boolean sucess = true;
RunningJob running = null;
try {
running = jc.submitJob(job);
JobID jobId = running.getID();
System.out.println("Job " + jobId + " is submitted");
while (!running.isComplete()) {
System.out.println("Job " + jobId + " is still running.");
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
}
running = jc.getJob(jobId);
}
sucess = running.isSuccessful();
} finally {
if (!sucess && (running != null)) {
running.killJob();
}
jc.close();
}
return sucess;
}
示例5: shoudBeValidMapReduceEvaluation
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceEvaluation() throws Exception {
Configuration cfg = UTIL.getConfiguration();
JobConf jobConf = new JobConf(cfg);
try {
jobConf.setJobName("process row task");
jobConf.setNumReduceTasks(1);
TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
jobConf);
TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
ClassificatorRowReduce.class, jobConf);
RunningJob job = JobClient.runJob(jobConf);
assertTrue(job.isSuccessful());
} finally {
if (jobConf != null)
FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
}
}
示例6: shoudBeValidMapReduceWithPartitionerEvaluation
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
throws IOException {
Configuration cfg = UTIL.getConfiguration();
JobConf jobConf = new JobConf(cfg);
try {
jobConf.setJobName("process row task");
jobConf.setNumReduceTasks(2);
TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
jobConf);
TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
RunningJob job = JobClient.runJob(jobConf);
assertTrue(job.isSuccessful());
} finally {
if (jobConf != null)
FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
}
}
示例7: runJob
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
throws IOException, InterruptedException, ClassNotFoundException {
JobConf job = new JobConf(TEST_UTIL.getConfiguration());
job.setJobName(jobName);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);
TableMapReduceUtil.addDependencyJars(job);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(1); // one to get final "first" and "last" key
FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
LOG.info("Started " + job.getJobName());
RunningJob runningJob = JobClient.runJob(job);
runningJob.waitForCompletion();
assertTrue(runningJob.isSuccessful());
LOG.info("After map/reduce completion - job " + jobName);
}
示例8: runTestOnTable
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
private void runTestOnTable(HTable table) throws IOException {
JobConf jobConf = null;
try {
LOG.info("Before map/reduce startup");
jobConf = new JobConf(UTIL.getConfiguration(), TestTableMapReduce.class);
jobConf.setJobName("process column contents");
jobConf.setNumReduceTasks(1);
TableMapReduceUtil.initTableMapJob(Bytes.toString(table.getTableName()),
Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class,
ImmutableBytesWritable.class, Put.class, jobConf);
TableMapReduceUtil.initTableReduceJob(Bytes.toString(table.getTableName()),
IdentityTableReduce.class, jobConf);
LOG.info("Started " + Bytes.toString(table.getTableName()));
RunningJob job = JobClient.runJob(jobConf);
assertTrue(job.isSuccessful());
LOG.info("After map/reduce completion");
// verify map-reduce results
verify(Bytes.toString(table.getTableName()));
} finally {
if (jobConf != null) {
FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
}
}
}
示例9: verifyACLViewJob
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
/**
* Verify JobContext.JOB_ACL_VIEW_JOB
*
* @throws IOException
* @throws InterruptedException
*/
private void verifyACLViewJob() throws IOException, InterruptedException {
// Set the job up.
final JobConf myConf = mr.createJobConf();
myConf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague);
// Submit the job as user1
RunningJob job = submitJobAsUser(myConf, jobSubmitter);
final JobID jobId = job.getID();
// Try operations as an unauthorized user.
verifyViewJobAsUnauthorizedUser(myConf, jobId, modifyColleague);
// Try operations as an authorized user, who is part of view-job-acl.
verifyViewJobAsAuthorizedUser(myConf, jobId, viewColleague);
// Try operations as an authorized user, who is a queue administrator.
verifyViewJobAsAuthorizedUser(myConf, jobId, qAdmin);
// Clean up the job
job.killJob();
}
示例10: submitJobAsUser
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
/**
* Submits a sleep job with 1 map task that runs for a long time(2000 sec)
* @param clusterConf
* @param user the jobOwner
* @return RunningJob that is started
* @throws IOException
* @throws InterruptedException
*/
private RunningJob submitJobAsUser(final JobConf clusterConf, String user)
throws IOException, InterruptedException {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(user, new String[] {});
RunningJob job = (RunningJob) ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
JobClient jobClient = new JobClient(clusterConf);
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(clusterConf);
JobConf jobConf = sleepJob.setupJobConf(1, 0, 2000, 1000, 1000, 1000);
RunningJob runningJob = jobClient.submitJob(jobConf);
return runningJob;
}
});
return job;
}
示例11: abortJob
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
super.abortJob(context, runState);
final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
String diag = "";
for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
switch (event.getTaskStatus()) {
case SUCCEEDED:
break;
default:
diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
diag += s + "\n";
diag += "\n";
break;
}
updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
示例12: initialize
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
private void initialize() {
if (initialized) {
return;
}
try {
JobClient jobClient = new JobClient(getConf());
for (RepairJob job : recoverActiveJobs()) {
org.apache.hadoop.mapred.JobID jobId =
new org.apache.hadoop.mapred.JobID(job.getJtIdentifier(),
job.getJobId());
RunningJob runningJob = jobClient.getJob(jobId);
if (runningJob == null) {
throw new IOException("Failed to recover");
}
ActiveRepair recovered = new ActiveRepair(jobId, runningJob,
new Path(job.getInDir()), new Path(job.getOutDir()));
currentRepairs.put(job.getPath(), recovered);
}
} catch (IOException e) {
LOG.error("Encoding job recovery failed", e);
throw new RuntimeException(e);
}
initialized = true;
}
示例13: encryptedShuffleWithCerts
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
useClientCerts);
conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer =
new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
示例14: runJob
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps,
int numReds) throws IOException, InterruptedException {
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (!fs.exists(inDir)) {
fs.mkdirs(inDir);
}
String input = "The quick brown fox\n" + "has many silly\n"
+ "red fox sox\n";
for (int i = 0; i < numMaps; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
conf.setOutputCommitter(CustomOutputCommitter.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setNumMapTasks(numMaps);
conf.setNumReduceTasks(numReds);
JobClient jobClient = new JobClient(conf);
RunningJob job = jobClient.submitJob(conf);
return jobClient.monitorAndPrintJob(conf, job);
}
示例15: doTestWithMapReduce
import org.apache.hadoop.mapred.RunningJob; //导入依赖的package包/类
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
int expectedNumSplits, boolean shutdownCluster) throws Exception {
//create the table and snapshot
createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
try {
// create the job
JobConf jobConf = new JobConf(util.getConfiguration());
jobConf.setJarByClass(util.getClass());
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, jobConf, true, tableDir);
jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
jobConf.setNumReduceTasks(1);
jobConf.setOutputFormat(NullOutputFormat.class);
RunningJob job = JobClient.runJob(jobConf);
Assert.assertTrue(job.isSuccessful());
} finally {
if (!shutdownCluster) {
util.getHBaseAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
}
}
}