当前位置: 首页>>代码示例>>Java>>正文


Java RunningJob.isSuccessful方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.RunningJob.isSuccessful方法的典型用法代码示例。如果您正苦于以下问题:Java RunningJob.isSuccessful方法的具体用法?Java RunningJob.isSuccessful怎么用?Java RunningJob.isSuccessful使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapred.RunningJob的用法示例。


在下文中一共展示了RunningJob.isSuccessful方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configure

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestKeyFieldBasedComparator.java

示例2: runJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DataJoinJob.java

示例3: configure

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set("map.output.key.field.separator", " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:69,代码来源:TestKeyFieldBasedComparator.java

示例4: configure

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path("build/test/test.mapred.spill");
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(2);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines (both the lines must end up in the same
    //reducer since the partitioner takes the same key spec for all
    //lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:69,代码来源:TestKeyFieldBasedComparator.java

示例5: runJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
public static boolean runJob( DMLConfig conf ) 
	throws Exception
{
	boolean ret = false;
	
	try
	{
		JobConf job;
		job = new JobConf(CleanupMR.class);
		job.setJobName("Cleanup-MR");
		
		//set up SystemML local tmp dir
		String dir = conf.getTextValue(DMLConfig.LOCAL_TMP_DIR);
		MRJobConfiguration.setSystemMLLocalTmpDir(job, dir); 
		
		//set mappers, reducers 
		int numNodes = InfrastructureAnalyzer.getRemoteParallelNodes();
		job.setMapperClass(CleanupMapper.class); //map-only
		job.setNumMapTasks(numNodes); //numMappers
		job.setNumReduceTasks( 0 );			
		
		//set input/output format, input path
		String inFileName = conf.getTextValue(DMLConfig.SCRATCH_SPACE)+"/cleanup_tasks";
		job.setInputFormat(NLineInputFormat.class);
	    job.setOutputFormat(NullOutputFormat.class);
	    
		Path path = new Path( inFileName );
	    FileInputFormat.setInputPaths(job, path);
	    writeCleanupTasksToFile(path, numNodes);
	    
		//disable automatic tasks timeouts and speculative task exec
		job.setInt(MRConfigurationNames.MR_TASK_TIMEOUT, 0);
		job.setMapSpeculativeExecution(false);
		
		/////
		// execute the MR job			
		RunningJob runjob = JobClient.runJob(job);
		
		ret = runjob.isSuccessful();
	}
	catch(Exception ex)
	{
		//don't raise an exception, just gracefully an error message.
		LOG.error("Failed to run cleanup MR job. ",ex);
	}
	
	return ret;
}
 
开发者ID:apache,项目名称:systemml,代码行数:49,代码来源:CleanupMR.java

示例6: runStitchupJob

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private static boolean runStitchupJob(String input, long rlen, long clen, int brlen, int bclen, long[] counts,
		int numReducers, int replication, String output) 
	throws Exception 
{
	JobConf job = new JobConf(SortMR.class);
	job.setJobName("SortIndexesMR");

	//setup input/output paths
	Path inpath = new Path(input);
	Path outpath = new Path(output);
	FileInputFormat.setInputPaths(job, inpath);
	FileOutputFormat.setOutputPath(job, outpath);	    
	MapReduceTool.deleteFileIfExistOnHDFS(outpath, job);
	
	//set number of reducers (1 if local mode)
	if( InfrastructureAnalyzer.isLocalMode(job) )
		job.setNumReduceTasks(1);
	else
		MRJobConfiguration.setNumReducers(job, numReducers, numReducers);
	
	//setup input/output format
	InputInfo iinfo = InputInfo.BinaryBlockInputInfo;
	OutputInfo oinfo = OutputInfo.BinaryBlockOutputInfo;
	job.setInputFormat(iinfo.inputFormatClass);
	job.setOutputFormat(oinfo.outputFormatClass);
	CompactInputFormat.setKeyValueClasses(job, MatrixIndexes.class, MatrixBlock.class);
	
	//setup mapper/reducer/output classes
	MRJobConfiguration.setInputInfo(job, (byte)0, InputInfo.BinaryBlockInputInfo, brlen, bclen, ConvertTarget.BLOCK);
	job.setMapperClass(IndexSortStitchupMapper.class);
	job.setReducerClass(IndexSortStitchupReducer.class);	
	job.setOutputKeyClass(oinfo.outputKeyClass);
	job.setOutputValueClass(oinfo.outputValueClass); 
	MRJobConfiguration.setBlockSize(job, (byte)0, brlen, bclen);
	MRJobConfiguration.setMatricesDimensions(job, new byte[]{0}, new long[]{rlen}, new long[]{clen});
	
	//compute shifted prefix sum of offsets and put into configuration
	long[] cumsumCounts = new long[counts.length];
	long sum = 0;
	for( int i=0; i<counts.length; i++ ) {
		cumsumCounts[i] = sum;
		sum += counts[i];
	}
	job.set(SORT_INDEXES_OFFSETS, Arrays.toString(cumsumCounts));
	
	//setup replication factor
	job.setInt(MRConfigurationNames.DFS_REPLICATION, replication);
	
	//set unique working dir
	MRJobConfiguration.setUniqueWorkingDir(job);
	
	//run mr job
	RunningJob runJob = JobClient.runJob(job);
	
	return runJob.isSuccessful();
}
 
开发者ID:apache,项目名称:systemml,代码行数:57,代码来源:SortMR.java

示例7: run

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
private void run(boolean ioEx, boolean rtEx) throws Exception {
  Path inDir = new Path("testing/mt/input");
  Path outDir = new Path("testing/mt/output");

  // Hack for local FS that does not have the concept of a 'mounting point'
  if (isLocalFS()) {
    String localPathRoot = System.getProperty("test.build.data", "/tmp")
            .replace(' ', '+');
    inDir = new Path(localPathRoot, inDir);
    outDir = new Path(localPathRoot, outDir);
  }


  JobConf conf = createJobConf();
  FileSystem fs = FileSystem.get(conf);

  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
    file.writeBytes("a\nb\n\nc\nd\ne");
    file.close();
  }

  conf.setJobName("mt");
  conf.setInputFormat(TextInputFormat.class);

  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapOutputKeyClass(LongWritable.class);
  conf.setMapOutputValueClass(Text.class);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);

  conf.setMapperClass(IDMap.class);
  conf.setReducerClass(IDReduce.class);

  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);

  conf.setMapRunnerClass(MultithreadedMapRunner.class);
  
  conf.setInt(MultithreadedMapper.NUM_THREADS, 2);

  if (ioEx) {
    conf.setBoolean("multithreaded.ioException", true);
  }
  if (rtEx) {
    conf.setBoolean("multithreaded.runtimeException", true);
  }
  JobClient jc = new JobClient(conf);
  RunningJob job =jc.submitJob(conf);
  while (!job.isComplete()) {
    Thread.sleep(100);
  }

  if (job.isSuccessful()) {
    assertFalse(ioEx || rtEx);
  }
  else {
    assertTrue(ioEx || rtEx);
  }

}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:70,代码来源:TestMultithreadedMapRunner.java

示例8: updateProgress

import org.apache.hadoop.mapred.RunningJob; //导入方法依赖的package包/类
@Override
public void updateProgress() {

	for (HadoopJob job : jobs.values()) {

		int state = WAIT;

		if (job != null) {

			String hadoopJobId = job.getJobId();

			if (hadoopJobId != null) {

				RunningJob hadoopJob = HadoopUtil.getInstance().getJob(
						hadoopJobId);
				try {

					if (hadoopJob != null) {

						if (hadoopJob.isComplete()) {

							if (hadoopJob.isSuccessful()) {
								state = OK;
							} else {
								state = FAILED;
							}

						} else {

							if (hadoopJob.getJobStatus().mapProgress() > 0) {

								state = RUNNING;

							} else {
								state = WAIT;
							}

						}

					} else {
						state = WAIT;
					}
				} catch (IOException e) {

					state = WAIT;

				}

			}

		}

		states.put(job, state);

	}

}
 
开发者ID:genepi,项目名称:imputationserver,代码行数:58,代码来源:ParallelHadoopJobStep.java


注:本文中的org.apache.hadoop.mapred.RunningJob.isSuccessful方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。