当前位置: 首页>>代码示例>>Java>>正文


Java RunningJob.isComplete方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.RunningJob.isComplete方法的典型用法代码示例。如果您正苦于以下问题:Java RunningJob.isComplete方法的具体用法?Java RunningJob.isComplete怎么用?Java RunningJob.isComplete使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.RunningJob的用法示例。


在下文中一共展示了RunningJob.isComplete方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configure

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestKeyFieldBasedComparator.java

示例2: runJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DataJoinJob.java

示例3: testFailoverWhileRunningJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testFailoverWhileRunningJob() throws Exception {
  LOG.info("Running job failover test");

  // Inspired by TestRecoveryManager#testJobResubmission
  
  FileUtil.fullyDelete(new File("/tmp/tst"));
  
  // start a job on jt1
  JobConf job1 = new JobConf(conf);
  String signalFile = new Path(TEST_DIR, "signal").toString();
  UtilsForTests.configureWaitingJobConf(job1, new Path(TEST_DIR, "input"),
      new Path(TEST_DIR, "output3"), 2, 0, "test-resubmission", signalFile,
      signalFile);
  JobClient jc = new JobClient(job1);
  RunningJob rJob1 = jc.submitJob(job1);
  while (rJob1.mapProgress() < 0.5f) {
    LOG.info("Waiting for job " + rJob1.getID() + " to be 50% done: " +
        rJob1.mapProgress());
    UtilsForTests.waitFor(500);
  }
  LOG.info("Waiting for job " + rJob1.getID() + " to be 50% done: " +
      rJob1.mapProgress());
  
  // Shut the first JT down, causing automatic failover
  LOG.info("Shutting down jt1");
  cluster.shutdownJobTracker(0);
  
  // allow job to complete
  FileSystem fs = FileSystem.getLocal(conf);
  fs.create(new Path(TEST_DIR, "signal"));
  while (!rJob1.isComplete()) {
    LOG.info("Waiting for job " + rJob1.getID() + " to be successful: " +
        rJob1.mapProgress());
    UtilsForTests.waitFor(500);
  }
  assertTrue("Job should be successful", rJob1.isSuccessful());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:39,代码来源:TestMRZKFailoverController.java

示例4: verifyCompletedJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
 * Verification API to check if the job completion state is correct. <br/>
 * 
 * @param id id of the job to be verified.
 */

public void verifyCompletedJob(JobID id) throws Exception{
  RunningJob rJob = getClient().getJob(
      org.apache.hadoop.mapred.JobID.downgrade(id));
  while(!rJob.isComplete()) {
    LOG.info("waiting for job :" + id + " to retire");
    Thread.sleep(1000);
    rJob = getClient().getJob(
        org.apache.hadoop.mapred.JobID.downgrade(id));
  }
  verifyJobDetails(id);
  verifyJobHistory(id);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:JTClient.java

示例5: runProgram

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);
      
      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());
    
    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
  		                        new Utils.OutputFileUtils
  		                                 .OutputFilesFilter()))) {
    results.add(MapReduceTestUtil.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:63,代码来源:TestPipes.java

示例6: runProgram

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);
      
      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());
    
    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(MapReduceTestUtil.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:62,代码来源:TestPipes.java

示例7: configure

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set("map.output.key.field.separator", " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:69,代码来源:TestKeyFieldBasedComparator.java

示例8: runProgram

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, 
                        Path program, Path inputPath, Path outputPath,
                        int numMaps, int numReduces, String[] expectedResults,
                        JobConf conf
                       ) throws IOException {
  Path wordExec = new Path("/testing/bin/application");
  JobConf job = null;
  if(conf == null) {
    job = mr.createJobConf();
  }else {
    job = new JobConf(conf);
  } 
  job.setNumMapTasks(numMaps);
  job.setNumReduceTasks(numReduces);
  {
    FileSystem fs = dfs.getFileSystem();
    fs.delete(wordExec.getParent(), true);
    fs.copyFromLocalFile(program, wordExec);                                         
    Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
    Submitter.setIsJavaRecordReader(job, true);
    Submitter.setIsJavaRecordWriter(job, true);
    FileInputFormat.setInputPaths(job, inputPath);
    FileOutputFormat.setOutputPath(job, outputPath);
    RunningJob rJob = null;
    if (numReduces == 0) {
      rJob = Submitter.jobSubmit(job);
      
      while (!rJob.isComplete()) {
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
      }
    } else {
      rJob = Submitter.runJob(job);
    }
    assertTrue("pipes job failed", rJob.isSuccessful());
    
    Counters counters = rJob.getCounters();
    Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
    int numCounters = 0;
    for (Counter c : wordCountCounters) {
      System.out.println(c);
      ++numCounters;
    }
    assertTrue("No counters found!", (numCounters > 0));
  }

  List<String> results = new ArrayList<String>();
  for (Path p:FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath,
      new Utils.OutputFileUtils.OutputFilesFilter()))) {
    results.add(TestMiniMRWithDFS.readOutput(p, job));
  }
  assertEquals("number of reduces is wrong", 
               expectedResults.length, results.size());
  for(int i=0; i < results.size(); i++) {
    assertEquals("pipes program " + program + " output " + i + " wrong",
                 expectedResults[i], results.get(i));
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:62,代码来源:TestPipes.java

示例9: configure

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:69,代码来源:TestKeyFieldBasedComparator.java

示例10: run

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void run(boolean ioEx, boolean rtEx) throws Exception {
  Path inDir = new Path("testing/mt/input");
  Path outDir = new Path("testing/mt/output");

  // Hack for local FS that does not have the concept of a 'mounting point'
  if (isLocalFS()) {
    String localPathRoot = System.getProperty("test.build.data", "/tmp")
            .replace(' ', '+');
    inDir = new Path(localPathRoot, inDir);
    outDir = new Path(localPathRoot, outDir);
  }


  JobConf conf = createJobConf();
  FileSystem fs = FileSystem.get(conf);

  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
    file.writeBytes("a\nb\n\nc\nd\ne");
    file.close();
  }

  conf.setJobName("mt");
  conf.setInputFormat(TextInputFormat.class);

  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapOutputKeyClass(LongWritable.class);
  conf.setMapOutputValueClass(Text.class);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapperClass(IDMap.class);
  conf.setReducerClass(IDReduce.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);

  conf.setMapRunnerClass(MultithreadedMapRunner.class);
  
  conf.setInt(MultithreadedMapper.NUM_THREADS, 2);

  if (ioEx) {
    conf.setBoolean("multithreaded.ioException", true);
  }
  if (rtEx) {
    conf.setBoolean("multithreaded.runtimeException", true);
  }
  JobClient jc = new JobClient(conf);
  RunningJob job =jc.submitJob(conf);
  while (!job.isComplete()) {
    Thread.sleep(100);
  }

  if (job.isSuccessful()) {
    assertFalse(ioEx || rtEx);
  }
  else {
    assertTrue(ioEx || rtEx);
  }

}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:70,代码来源:TestMultithreadedMapRunner.java

示例11: updateProgress

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
@Override
public void updateProgress() {

	for (HadoopJob job : jobs.values()) {

		int state = WAIT;

		if (job != null) {

			String hadoopJobId = job.getJobId();

			if (hadoopJobId != null) {

				RunningJob hadoopJob = HadoopUtil.getInstance().getJob(
						hadoopJobId);
				try {

					if (hadoopJob != null) {

						if (hadoopJob.isComplete()) {

							if (hadoopJob.isSuccessful()) {
								state = OK;
							} else {
								state = FAILED;
							}

						} else {

							if (hadoopJob.getJobStatus().mapProgress() > 0) {

								state = RUNNING;

							} else {
								state = WAIT;
							}

						}

					} else {
						state = WAIT;
					}
				} catch (IOException e) {

					state = WAIT;

				}

			}

		}

		states.put(job, state);

	}

}
 
开发者ID:genepi,项目名称:imputationserver,代码行数:58,代码来源:ParallelHadoopJobStep.java


注:本文中的org.apache.hadoop.mapred.RunningJob.isComplete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。