當前位置: 首頁>>代碼示例>>Java>>正文


Java Job.waitForCompletion方法代碼示例

本文整理匯總了Java中org.apache.hadoop.mapreduce.Job.waitForCompletion方法的典型用法代碼示例。如果您正苦於以下問題:Java Job.waitForCompletion方法的具體用法?Java Job.waitForCompletion怎麽用?Java Job.waitForCompletion使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.waitForCompletion方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        Configuration configuration = new Configuration();
        configuration.set("dictionary", args[2]);

        Job job = Job.getInstance(configuration);
        job.setJarByClass(SentimentAnalysis.class);
        job.setMapperClass(SentimentSplit.class);
        job.setReducerClass(SentimentCollection.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
 
開發者ID:yogykwan,項目名稱:mapreduce-samples,代碼行數:17,代碼來源:SentimentAnalysis.java

示例2: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
  if (args.length != 2) {
    System.err.println("Usage: wordstddev <in> <out>");
    return 0;
  }

  Configuration conf = getConf();

  Job job = Job.getInstance(conf, "word stddev");
  job.setJarByClass(WordStandardDeviation.class);
  job.setMapperClass(WordStandardDeviationMapper.class);
  job.setCombinerClass(WordStandardDeviationReducer.class);
  job.setReducerClass(WordStandardDeviationReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(LongWritable.class);
  FileInputFormat.addInputPath(job, new Path(args[0]));
  Path outputpath = new Path(args[1]);
  FileOutputFormat.setOutputPath(job, outputpath);
  boolean result = job.waitForCompletion(true);

  // read output and calculate standard deviation
  stddev = readAndCalcStdDev(outputpath, conf);

  return (result ? 0 : 1);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:WordStandardDeviation.java

示例3: testChainFail

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
 * Tests one of the mappers throwing exception.
 * 
 * @throws Exception
 */
public void testChainFail() throws Exception {

  Configuration conf = createJobConf();

  Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 0, input);
  job.setJobName("chain");

  ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
      LongWritable.class, Text.class, null);

  ChainMapper.addMapper(job, FailMap.class, LongWritable.class, Text.class,
      IntWritable.class, Text.class, null);

  ChainMapper.addMapper(job, Mapper.class, IntWritable.class, Text.class,
      LongWritable.class, Text.class, null);

  job.waitForCompletion(true);
  assertTrue("Job Not failed", !job.isSuccessful());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestChainErrors.java

示例4: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(getConf());
  if (args.length != 2) {
    usage();
    return 1;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraValidate");
  job.setJarByClass(TeraValidate.class);
  job.setMapperClass(ValidateMapper.class);
  job.setReducerClass(ValidateReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  // force a single split 
  FileInputFormat.setMinInputSplitSize(job, Long.MAX_VALUE);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:TeraValidate.java

示例5: jobListFriends

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
private Job jobListFriends(String inputPath, String outputPath) throws IOException, InterruptedException, ClassNotFoundException{      
    Job job = new Job();
    job.setJarByClass(WordCount.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(Map.class);
    job.setReducerClass(Reduce.class);

    job.setInputFormatClass(KeyValueTextInputFormat.class);   // Need to change the import
    job.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.waitForCompletion(true);

    return job;
}
 
開發者ID:dhruvmalik007,項目名稱:Deep_learning_using_Java,代碼行數:20,代碼來源:Recommendation_program.java

示例6: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(getConf());
  if (args.length != 2) {
    usage();
    return 2;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraSum");
  job.setJarByClass(TeraChecksum.class);
  job.setMapperClass(ChecksumMapper.class);
  job.setReducerClass(ChecksumReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(Unsigned16.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:TeraChecksum.java

示例7: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
 * The main driver for word count map/reduce program. Invoke this method to
 * submit the map/reduce job.
 * 
 * @throws IOException
 *           When there is communication problems with the job tracker.
 */
@SuppressWarnings("unchecked")
public static void main(String[] args) 
  throws IOException, InterruptedException, ClassNotFoundException  {
  Job job = ValueAggregatorJob.createValueAggregatorJob(args
      , new Class[] {WordCountPlugInClass.class});
  job.setJarByClass(AggregateWordCount.class);
  int ret = job.waitForCompletion(true) ? 0 : 1;
  System.exit(ret);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:AggregateWordCount.java

示例8: runImport

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
 * Runs an import job with the specified command line args
 * @param args
 * @return true if job completed successfully
 * @throws IOException
 * @throws InterruptedException
 * @throws ClassNotFoundException
 */
boolean runImport(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
  // need to make a copy of the configuration because to make sure different temp dirs are used.
  GenericOptionsParser opts =
    new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args);
  Configuration conf = opts.getConfiguration();
  args = opts.getRemainingArgs();
  Job job = Import.createSubmittableJob(conf, args);
  job.waitForCompletion(false);
  return job.isSuccessful();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestImportExport.java

示例9: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) {
    if (args.length != 2) {
        System.err.println("Usage: Year Traffic Statistics <input path> <output path>");
        System.exit(-1);
    }
    String nginxLogInput = args[0];
    String nginxLogOutput = args[1];

    Configuration configuration = new Configuration();
    try {
        Job job = Job.getInstance(configuration);
        job.setJobName("YearTrafficStatistics");

        job.setJarByClass(YearTrafficStatisticsMapReduce.class);

        FileInputFormat.addInputPath(job, new Path(nginxLogInput));
        FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapperClass(YearTrafficStatisticsMapper.class);
        job.setReducerClass(YearTrafficStatisticsReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.waitForCompletion(true);
    } catch (IOException | InterruptedException | ClassNotFoundException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-mapreduce,代碼行數:35,代碼來源:YearTrafficStatisticsMapReduce.java

示例10: runExport

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
/**
 * Runs an export job with the specified command line args
 * @param args
 * @return true if job completed successfully
 * @throws IOException
 * @throws InterruptedException
 * @throws ClassNotFoundException
 */
boolean runExport(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
  // need to make a copy of the configuration because to make sure different temp dirs are used.
  GenericOptionsParser opts =
    new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args);
  Configuration conf = opts.getConfiguration();
  args = opts.getRemainingArgs();
  Job job = Export.createSubmittableJob(conf, args);
  job.waitForCompletion(false);
  return job.isSuccessful();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestImportExport.java

示例11: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
  String[] otherArgs = new GenericOptionsParser(getConf(), args).getRemainingArgs();
  Job job = createSubmittableJob(otherArgs);
  if (job == null) return 1;
  if (!job.waitForCompletion(true)) {
    LOG.info("Map-reduce job failed!");
    if (bulkload) {
      LOG.info("Files are not bulkloaded!");
    }
    return 1;
  }
  int code = 0;
  if (bulkload) {
    code = new LoadIncrementalHFiles(this.getConf()).run(new String[]{this.bulkloadDir.toString(),
        this.dstTableName});
    if (code == 0) {
      // bulkloadDir is deleted only LoadIncrementalHFiles was successful so that one can rerun
      // LoadIncrementalHFiles.
      FileSystem fs = FileSystem.get(this.getConf());
      if (!fs.delete(this.bulkloadDir, true)) {
        LOG.error("Deleting folder " + bulkloadDir + " failed!");
        code = 1;
      }
    }
  }
  return code;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:CopyTable.java

示例12: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
public int run(String[] args) throws Exception {
  if (args.length != 2) {
    System.err.println("Usage: wordmedian <in> <out>");
    return 0;
  }

  setConf(new Configuration());
  Configuration conf = getConf();

  Job job = Job.getInstance(conf, "word median");
  job.setJarByClass(WordMedian.class);
  job.setMapperClass(WordMedianMapper.class);
  job.setCombinerClass(WordMedianReducer.class);
  job.setReducerClass(WordMedianReducer.class);
  job.setOutputKeyClass(IntWritable.class);
  job.setOutputValueClass(IntWritable.class);
  FileInputFormat.addInputPath(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  boolean result = job.waitForCompletion(true);

  // Wait for JOB 1 -- get middle value to check for Median

  long totalWords = job.getCounters()
      .getGroup(TaskCounter.class.getCanonicalName())
      .findCounter("MAP_OUTPUT_RECORDS", "Map output records").getValue();
  int medianIndex1 = (int) Math.ceil((totalWords / 2.0));
  int medianIndex2 = (int) Math.floor((totalWords / 2.0));

  median = readAndFindMedian(args[1], medianIndex1, medianIndex2, conf);

  return (result ? 0 : 1);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:WordMedian.java

示例13: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) {
    if (args.length != 2) {
        System.err.println("Usage: Month Traffic Statistics <input path> <output path>");
        System.exit(-1);
    }
    String nginxLogInput = args[0];
    String nginxLogOutput = args[1];

    Configuration configuration = new Configuration();
    try {
        Job job = Job.getInstance(configuration);
        job.setJobName("MonthTrafficStatistics");

        job.setJarByClass(MonthTrafficStatisticsMapReduce.class);

        FileInputFormat.addInputPath(job, new Path(nginxLogInput));
        FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapperClass(MonthTrafficStatisticsMapper.class);
        job.setReducerClass(MonthTrafficStatisticsReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.waitForCompletion(true);
    } catch (IOException | InterruptedException | ClassNotFoundException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-mapreduce,代碼行數:35,代碼來源:MonthTrafficStatisticsMapReduce.java

示例14: run

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
@Override
	public int run(String[] arg0) throws Exception {
		List<Scan> mainSiteScan = new ArrayList<Scan>();
		Scan siteScan = new Scan();
		siteScan.setAttribute("scan.attributes.table.name", Bytes.toBytes("myTest07WebSite"));
		System.out.println(siteScan.getAttribute("scan.attributes.table.name"));
		mainSiteScan.add(siteScan);
 
		Scan webSitehitScan = new Scan();
		webSitehitScan.setAttribute("scan.attributes.table.name", Bytes.toBytes("myTest07SiteHits"));// lookup for the table which we have created and is having the site hit data.
		System.out.println(webSitehitScan.getAttribute("scan.attributes.table.name"));
		mainSiteScan.add(webSitehitScan);
 
		Configuration conf = new Configuration();
		Job job = new Job(conf);
// will get the server details of Hbase/hadoop	
		job.setJarByClass(TableWebsiteJob.class);
 // setting the class name to the job
		TableMapReduceUtil.initTableMapperJob(
				mainSiteScan, // tables to read from 
				TableWebsiteMapper.class, 
				Text.class, 
				IntWritable.class, 
				job);
	    TableMapReduceUtil.initTableReducerJob(
	    		"myTest07SiteHitsPlusWebSite",
	    		TableWebsiteReducer.class, 
	    		job);
	    job.waitForCompletion(true);
		return 0;
// totalhit is the third table which will receive the data
	}
 
開發者ID:PacktPublishing,項目名稱:HBase-High-Performance-Cookbook,代碼行數:33,代碼來源:TableWebsiteJob.java

示例15: main

import org.apache.hadoop.mapreduce.Job; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(UnitSum.class);
        job.setMapperClass(PassMapper.class);
        job.setReducerClass(SumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.waitForCompletion(true);
    }
 
開發者ID:yogykwan,項目名稱:mapreduce-samples,代碼行數:14,代碼來源:UnitSum.java


注:本文中的org.apache.hadoop.mapreduce.Job.waitForCompletion方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。