當前位置: 首頁>>代碼示例>>Java>>正文


Java RunningJob.isSuccessful方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapred.RunningJob.isSuccessful方法的典型用法代碼示例。如果您正苦於以下問題:Java RunningJob.isSuccessful方法的具體用法?Java RunningJob.isSuccessful怎麽用?Java RunningJob.isSuccessful使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapred.RunningJob的用法示例。


在下文中一共展示了RunningJob.isSuccessful方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: configure

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:67,代碼來源:TestKeyFieldBasedComparator.java

示例2: runJob

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:33,代碼來源:DataJoinJob.java

示例3: configure

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set("map.output.key.field.separator", " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:Nextzero,項目名稱:hadoop-2.6.0-cdh5.4.3,代碼行數:69,代碼來源:TestKeyFieldBasedComparator.java

示例4: configure

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:ict-carch,項目名稱:hadoop-plus,代碼行數:69,代碼來源:TestKeyFieldBasedComparator.java

示例5: runJob

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
public static boolean runJob( DMLConfig conf ) 
	throws Exception
{
	boolean ret = false;
	
	try
	{
		JobConf job;
		job = new JobConf(CleanupMR.class);
		job.setJobName("Cleanup-MR");
		
		//set up SystemML local tmp dir
		String dir = conf.getTextValue(DMLConfig.LOCAL_TMP_DIR);
		MRJobConfiguration.setSystemMLLocalTmpDir(job, dir); 
		
		//set mappers, reducers 
		int numNodes = InfrastructureAnalyzer.getRemoteParallelNodes();
		job.setMapperClass(CleanupMapper.class); //map-only
		job.setNumMapTasks(numNodes); //numMappers
		job.setNumReduceTasks( 0 );			
		
		//set input/output format, input path
		String inFileName = conf.getTextValue(DMLConfig.SCRATCH_SPACE)+"/cleanup_tasks";
		job.setInputFormat(NLineInputFormat.class);
	    job.setOutputFormat(NullOutputFormat.class);
	    
		Path path = new Path( inFileName );
	    FileInputFormat.setInputPaths(job, path);
	    writeCleanupTasksToFile(path, numNodes);
	    
		//disable automatic tasks timeouts and speculative task exec
		job.setInt(MRConfigurationNames.MR_TASK_TIMEOUT, 0);
		job.setMapSpeculativeExecution(false);
		
		/////
		// execute the MR job			
		RunningJob runjob = JobClient.runJob(job);
		
		ret = runjob.isSuccessful();
	}
	catch(Exception ex)
	{
		//don't raise an exception, just gracefully an error message.
		LOG.error("Failed to run cleanup MR job. ",ex);
	}
	
	return ret;
}
 
開發者ID:apache,項目名稱:systemml,代碼行數:49,代碼來源:CleanupMR.java

示例6: runStitchupJob

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
private static boolean runStitchupJob(String input, long rlen, long clen, int brlen, int bclen, long[] counts,
		int numReducers, int replication, String output) 
	throws Exception 
{
	JobConf job = new JobConf(SortMR.class);
	job.setJobName("SortIndexesMR");

	//setup input/output paths
	Path inpath = new Path(input);
	Path outpath = new Path(output);
	FileInputFormat.setInputPaths(job, inpath);
	FileOutputFormat.setOutputPath(job, outpath);	    
	MapReduceTool.deleteFileIfExistOnHDFS(outpath, job);
	
	//set number of reducers (1 if local mode)
	if( InfrastructureAnalyzer.isLocalMode(job) )
		job.setNumReduceTasks(1);
	else
		MRJobConfiguration.setNumReducers(job, numReducers, numReducers);
	
	//setup input/output format
	InputInfo iinfo = InputInfo.BinaryBlockInputInfo;
	OutputInfo oinfo = OutputInfo.BinaryBlockOutputInfo;
	job.setInputFormat(iinfo.inputFormatClass);
	job.setOutputFormat(oinfo.outputFormatClass);
	CompactInputFormat.setKeyValueClasses(job, MatrixIndexes.class, MatrixBlock.class);
	
	//setup mapper/reducer/output classes
	MRJobConfiguration.setInputInfo(job, (byte)0, InputInfo.BinaryBlockInputInfo, brlen, bclen, ConvertTarget.BLOCK);
	job.setMapperClass(IndexSortStitchupMapper.class);
	job.setReducerClass(IndexSortStitchupReducer.class);	
	job.setOutputKeyClass(oinfo.outputKeyClass);
	job.setOutputValueClass(oinfo.outputValueClass); 
	MRJobConfiguration.setBlockSize(job, (byte)0, brlen, bclen);
	MRJobConfiguration.setMatricesDimensions(job, new byte[]{0}, new long[]{rlen}, new long[]{clen});
	
	//compute shifted prefix sum of offsets and put into configuration
	long[] cumsumCounts = new long[counts.length];
	long sum = 0;
	for( int i=0; i<counts.length; i++ ) {
		cumsumCounts[i] = sum;
		sum += counts[i];
	}
	job.set(SORT_INDEXES_OFFSETS, Arrays.toString(cumsumCounts));
	
	//setup replication factor
	job.setInt(MRConfigurationNames.DFS_REPLICATION, replication);
	
	//set unique working dir
	MRJobConfiguration.setUniqueWorkingDir(job);
	
	//run mr job
	RunningJob runJob = JobClient.runJob(job);
	
	return runJob.isSuccessful();
}
 
開發者ID:apache,項目名稱:systemml,代碼行數:57,代碼來源:SortMR.java

示例7: run

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
private void run(boolean ioEx, boolean rtEx) throws Exception {
  Path inDir = new Path("testing/mt/input");
  Path outDir = new Path("testing/mt/output");

  // Hack for local FS that does not have the concept of a 'mounting point'
  if (isLocalFS()) {
    String localPathRoot = System.getProperty("test.build.data", "/tmp")
            .replace(' ', '+');
    inDir = new Path(localPathRoot, inDir);
    outDir = new Path(localPathRoot, outDir);
  }


  JobConf conf = createJobConf();
  FileSystem fs = FileSystem.get(conf);

  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
    file.writeBytes("a\nb\n\nc\nd\ne");
    file.close();
  }

  conf.setJobName("mt");
  conf.setInputFormat(TextInputFormat.class);

  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapOutputKeyClass(LongWritable.class);
  conf.setMapOutputValueClass(Text.class);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapperClass(IDMap.class);
  conf.setReducerClass(IDReduce.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);

  conf.setMapRunnerClass(MultithreadedMapRunner.class);
  
  conf.setInt(MultithreadedMapper.NUM_THREADS, 2);

  if (ioEx) {
    conf.setBoolean("multithreaded.ioException", true);
  }
  if (rtEx) {
    conf.setBoolean("multithreaded.runtimeException", true);
  }
  JobClient jc = new JobClient(conf);
  RunningJob job =jc.submitJob(conf);
  while (!job.isComplete()) {
    Thread.sleep(100);
  }

  if (job.isSuccessful()) {
    assertFalse(ioEx || rtEx);
  }
  else {
    assertTrue(ioEx || rtEx);
  }

}
 
開發者ID:hopshadoop,項目名稱:hops,代碼行數:70,代碼來源:TestMultithreadedMapRunner.java

示例8: updateProgress

import org.apache.hadoop.mapred.RunningJob; //導入方法依賴的package包/類
@Override
public void updateProgress() {

	for (HadoopJob job : jobs.values()) {

		int state = WAIT;

		if (job != null) {

			String hadoopJobId = job.getJobId();

			if (hadoopJobId != null) {

				RunningJob hadoopJob = HadoopUtil.getInstance().getJob(
						hadoopJobId);
				try {

					if (hadoopJob != null) {

						if (hadoopJob.isComplete()) {

							if (hadoopJob.isSuccessful()) {
								state = OK;
							} else {
								state = FAILED;
							}

						} else {

							if (hadoopJob.getJobStatus().mapProgress() > 0) {

								state = RUNNING;

							} else {
								state = WAIT;
							}

						}

					} else {
						state = WAIT;
					}
				} catch (IOException e) {

					state = WAIT;

				}

			}

		}

		states.put(job, state);

	}

}
 
開發者ID:genepi,項目名稱:imputationserver,代碼行數:58,代碼來源:ParallelHadoopJobStep.java


注:本文中的org.apache.hadoop.mapred.RunningJob.isSuccessful方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。