當前位置: 首頁>>代碼示例>>Java>>正文


Java RunningJob類代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.RunningJob的典型用法代碼示例。如果您正苦於以下問題:Java RunningJob類的具體用法?Java RunningJob怎麽用?Java RunningJob使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


RunningJob類屬於org.apache.hadoop.mapred包,在下文中一共展示了RunningJob類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testInputFormat

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
void testInputFormat(Class<? extends InputFormat> clazz) throws IOException {
  final JobConf job = MapreduceTestingShim.getJobConf(mrCluster);
  job.setInputFormat(clazz);
  job.setOutputFormat(NullOutputFormat.class);
  job.setMapperClass(ExampleVerifier.class);
  job.setNumReduceTasks(0);
  LOG.debug("submitting job.");
  final RunningJob run = JobClient.runJob(job);
  assertTrue("job failed!", run.isSuccessful());
  assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter());
  assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter());
  assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter());
  assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter());
  assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters()
      .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestTableInputFormat.java

示例2: configure

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:67,代碼來源:TestKeyFieldBasedComparator.java

示例3: mrRun

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
private void mrRun() throws Exception {
  FileSystem fs = FileSystem.get(getJobConf());
  Path inputDir = new Path("input");
  fs.mkdirs(inputDir);
  Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
  writer.write("hello");
  writer.close();

  Path outputDir = new Path("output", "output");

  JobConf jobConf = new JobConf(getJobConf());
  jobConf.setInt("mapred.map.tasks", 1);
  jobConf.setInt("mapred.map.max.attempts", 1);
  jobConf.setInt("mapred.reduce.max.attempts", 1);
  jobConf.set("mapred.input.dir", inputDir.toString());
  jobConf.set("mapred.output.dir", outputDir.toString());

  JobClient jobClient = new JobClient(jobConf);
  RunningJob runJob = jobClient.submitJob(jobConf);
  runJob.waitForCompletion();
  assertTrue(runJob.isComplete());
  assertTrue(runJob.isSuccessful());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:24,代碼來源:TestMiniMRProxyUser.java

示例4: runJob

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:DataJoinJob.java

示例5: shoudBeValidMapReduceEvaluation

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceEvaluation() throws Exception {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);
    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:TestTableMapReduceUtil.java

示例6: shoudBeValidMapReduceWithPartitionerEvaluation

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
@Test
@SuppressWarnings("deprecation")
public void shoudBeValidMapReduceWithPartitionerEvaluation()
    throws IOException {
  Configuration cfg = UTIL.getConfiguration();
  JobConf jobConf = new JobConf(cfg);
  try {
    jobConf.setJobName("process row task");
    jobConf.setNumReduceTasks(2);
    TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY),
        ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class,
        jobConf);

    TableMapReduceUtil.initTableReduceJob(TABLE_NAME,
        ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class);
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
  } finally {
    if (jobConf != null)
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestTableMapReduceUtil.java

示例7: runJob

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
@Override
protected void runJob(String jobName, Configuration c, List<Scan> scans)
    throws IOException, InterruptedException, ClassNotFoundException {
  JobConf job = new JobConf(TEST_UTIL.getConfiguration());

  job.setJobName(jobName);
  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);

  TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir);

  TableMapReduceUtil.addDependencyJars(job);

  job.setReducerClass(Reducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job, new Path(job.getJobName()));
  LOG.info("Started " + job.getJobName());

  RunningJob runningJob = JobClient.runJob(job);
  runningJob.waitForCompletion();
  assertTrue(runningJob.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestMultiTableSnapshotInputFormat.java

示例8: runTestOnTable

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
private void runTestOnTable(HTable table) throws IOException {
  JobConf jobConf = null;
  try {
    LOG.info("Before map/reduce startup");
    jobConf = new JobConf(UTIL.getConfiguration(), TestTableMapReduce.class);
    jobConf.setJobName("process column contents");
    jobConf.setNumReduceTasks(1);
    TableMapReduceUtil.initTableMapJob(Bytes.toString(table.getTableName()),
      Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class,
      ImmutableBytesWritable.class, Put.class, jobConf);
    TableMapReduceUtil.initTableReduceJob(Bytes.toString(table.getTableName()),
      IdentityTableReduce.class, jobConf);

    LOG.info("Started " + Bytes.toString(table.getTableName()));
    RunningJob job = JobClient.runJob(jobConf);
    assertTrue(job.isSuccessful());
    LOG.info("After map/reduce completion");

    // verify map-reduce results
    verify(Bytes.toString(table.getTableName()));
  } finally {
    if (jobConf != null) {
      FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:27,代碼來源:TestTableMapReduce.java

示例9: verifyACLViewJob

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
/**
 * Verify JobContext.JOB_ACL_VIEW_JOB
 * 
 * @throws IOException
 * @throws InterruptedException
 */
private void verifyACLViewJob() throws IOException, InterruptedException {

  // Set the job up.
  final JobConf myConf = mr.createJobConf();
  myConf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague);

  // Submit the job as user1
  RunningJob job = submitJobAsUser(myConf, jobSubmitter);

  final JobID jobId = job.getID();

  // Try operations as an unauthorized user.
  verifyViewJobAsUnauthorizedUser(myConf, jobId, modifyColleague);

  // Try operations as an authorized user, who is part of view-job-acl.
  verifyViewJobAsAuthorizedUser(myConf, jobId, viewColleague);

  // Try operations as an authorized user, who is a queue administrator.
  verifyViewJobAsAuthorizedUser(myConf, jobId, qAdmin);

  // Clean up the job
  job.killJob();
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:30,代碼來源:TestJobACLs.java

示例10: submitJobAsUser

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
/**
 * Submits a sleep job with 1 map task that runs for a long time(2000 sec)
 * @param clusterConf
 * @param user the jobOwner
 * @return RunningJob that is started
 * @throws IOException
 * @throws InterruptedException
 */
private RunningJob submitJobAsUser(final JobConf clusterConf, String user)
    throws IOException, InterruptedException {
  UserGroupInformation ugi =
      UserGroupInformation.createUserForTesting(user, new String[] {});
  RunningJob job = (RunningJob) ugi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      JobClient jobClient = new JobClient(clusterConf);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(clusterConf);
      JobConf jobConf = sleepJob.setupJobConf(1, 0, 2000, 1000, 1000, 1000);
      RunningJob runningJob = jobClient.submitJob(jobConf);
      return runningJob;
    }
  });
  return job;
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:26,代碼來源:TestJobACLs.java

示例11: abortJob

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
	super.abortJob(context, runState);

	final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
	final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
	String diag = "";
	for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
		switch (event.getTaskStatus()) {
			case SUCCEEDED:
				break;
               default:
				diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
				for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
					diag += s + "\n";
				diag += "\n";
				break;
		}
	updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
 
開發者ID:boalang,項目名稱:compiler,代碼行數:21,代碼來源:BoaOutputCommitter.java

示例12: initialize

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
private void initialize() {
  if (initialized) {
    return;
  }

  try {
    JobClient jobClient = new JobClient(getConf());
    for (RepairJob job : recoverActiveJobs()) {
      org.apache.hadoop.mapred.JobID jobId =
          new org.apache.hadoop.mapred.JobID(job.getJtIdentifier(),
              job.getJobId());
      RunningJob runningJob = jobClient.getJob(jobId);
      if (runningJob == null) {
        throw new IOException("Failed to recover");
      }
      ActiveRepair recovered = new ActiveRepair(jobId, runningJob,
          new Path(job.getInDir()), new Path(job.getOutDir()));
      currentRepairs.put(job.getPath(), recovered);
    }
  } catch (IOException e) {
    LOG.error("Encoding job recovery failed", e);
    throw new RuntimeException(e);
  }

  initialized = true;
}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:27,代碼來源:MapReduceBlockRepairManager.java

示例13: encryptedShuffleWithCerts

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
private void encryptedShuffleWithCerts(boolean useClientCerts)
  throws Exception {
  try {
    Configuration conf = new Configuration();
    String keystoresDir = new File(BASEDIR).getAbsolutePath();
    String sslConfsDir =
      KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,
                                    useClientCerts);
    conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
    startCluster(conf);
    FileSystem fs = FileSystem.get(getJobConf());
    Path inputDir = new Path("input");
    fs.mkdirs(inputDir);
    Writer writer =
      new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    writer.write("hello");
    writer.close();

    Path outputDir = new Path("output", "output");

    JobConf jobConf = new JobConf(getJobConf());
    jobConf.setInt("mapred.map.tasks", 1);
    jobConf.setInt("mapred.map.max.attempts", 1);
    jobConf.setInt("mapred.reduce.max.attempts", 1);
    jobConf.set("mapred.input.dir", inputDir.toString());
    jobConf.set("mapred.output.dir", outputDir.toString());
    JobClient jobClient = new JobClient(jobConf);
    RunningJob runJob = jobClient.submitJob(jobConf);
    runJob.waitForCompletion();
    Assert.assertTrue(runJob.isComplete());
    Assert.assertTrue(runJob.isSuccessful());
  } finally {
    stopCluster();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:TestEncryptedShuffle.java

示例14: runJob

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
static boolean runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
                         int numReds) throws IOException, InterruptedException {

  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }
  if (!fs.exists(inDir)) {
    fs.mkdirs(inDir);
  }
  String input = "The quick brown fox\n" + "has many silly\n"
      + "red fox sox\n";
  for (int i = 0; i < numMaps; ++i) {
    DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
    file.writeBytes(input);
    file.close();
  }

  DistributedCache.addFileToClassPath(TestMRJobs.APP_JAR, conf, fs);
  conf.setOutputCommitter(CustomOutputCommitter.class);
  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReds);

  JobClient jobClient = new JobClient(conf);
  
  RunningJob job = jobClient.submitJob(conf);
  return jobClient.monitorAndPrintJob(conf, job);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestMROldApiJobs.java

示例15: doTestWithMapReduce

import org.apache.hadoop.mapred.RunningJob; //導入依賴的package包/類
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int expectedNumSplits, boolean shutdownCluster) throws Exception {

  //create the table and snapshot
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    JobConf jobConf = new JobConf(util.getConfiguration());

    jobConf.setJarByClass(util.getClass());
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJars(jobConf,
      TestTableSnapshotInputFormat.class);

    TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
      TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
      NullWritable.class, jobConf, true, tableDir);

    jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    jobConf.setNumReduceTasks(1);
    jobConf.setOutputFormat(NullOutputFormat.class);

    RunningJob job = JobClient.runJob(jobConf);
    Assert.assertTrue(job.isSuccessful());
  } finally {
    if (!shutdownCluster) {
      util.getHBaseAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestTableSnapshotInputFormat.java


注:本文中的org.apache.hadoop.mapred.RunningJob類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。