当前位置: 首页>>代码示例>>Java>>正文


Java Job.waitForCompletion方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.Job.waitForCompletion方法的典型用法代码示例。如果您正苦于以下问题:Java Job.waitForCompletion方法的具体用法?Java Job.waitForCompletion怎么用?Java Job.waitForCompletion使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.Job的用法示例。


在下文中一共展示了Job.waitForCompletion方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        Configuration configuration = new Configuration();
        configuration.set("dictionary", args[2]);

        Job job = Job.getInstance(configuration);
        job.setJarByClass(SentimentAnalysis.class);
        job.setMapperClass(SentimentSplit.class);
        job.setReducerClass(SentimentCollection.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:17,代码来源:SentimentAnalysis.java

示例2: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  if (args.length != 2) {
    System.err.println("Usage: wordstddev <in> <out>");
    return 0;
  }

  Configuration conf = getConf();

  Job job = Job.getInstance(conf, "word stddev");
  job.setJarByClass(WordStandardDeviation.class);
  job.setMapperClass(WordStandardDeviationMapper.class);
  job.setCombinerClass(WordStandardDeviationReducer.class);
  job.setReducerClass(WordStandardDeviationReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(LongWritable.class);
  FileInputFormat.addInputPath(job, new Path(args[0]));
  Path outputpath = new Path(args[1]);
  FileOutputFormat.setOutputPath(job, outputpath);
  boolean result = job.waitForCompletion(true);

  // read output and calculate standard deviation
  stddev = readAndCalcStdDev(outputpath, conf);

  return (result ? 0 : 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:WordStandardDeviation.java

示例3: testChainFail

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Tests one of the mappers throwing exception.
 * 
 * @throws Exception
 */
public void testChainFail() throws Exception {

  Configuration conf = createJobConf();

  Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 1, 0, input);
  job.setJobName("chain");

  ChainMapper.addMapper(job, Mapper.class, LongWritable.class, Text.class,
      LongWritable.class, Text.class, null);

  ChainMapper.addMapper(job, FailMap.class, LongWritable.class, Text.class,
      IntWritable.class, Text.class, null);

  ChainMapper.addMapper(job, Mapper.class, IntWritable.class, Text.class,
      LongWritable.class, Text.class, null);

  job.waitForCompletion(true);
  assertTrue("Job Not failed", !job.isSuccessful());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestChainErrors.java

示例4: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(getConf());
  if (args.length != 2) {
    usage();
    return 1;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraValidate");
  job.setJarByClass(TeraValidate.class);
  job.setMapperClass(ValidateMapper.class);
  job.setReducerClass(ValidateReducer.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Text.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  // force a single split 
  FileInputFormat.setMinInputSplitSize(job, Long.MAX_VALUE);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TeraValidate.java

示例5: jobListFriends

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
private Job jobListFriends(String inputPath, String outputPath) throws IOException, InterruptedException, ClassNotFoundException{      
    Job job = new Job();
    job.setJarByClass(WordCount.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(Map.class);
    job.setReducerClass(Reduce.class);

    job.setInputFormatClass(KeyValueTextInputFormat.class);   // Need to change the import
    job.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.waitForCompletion(true);

    return job;
}
 
开发者ID:dhruvmalik007,项目名称:Deep_learning_using_Java,代码行数:20,代码来源:Recommendation_program.java

示例6: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public int run(String[] args) throws Exception {
  Job job = Job.getInstance(getConf());
  if (args.length != 2) {
    usage();
    return 2;
  }
  TeraInputFormat.setInputPaths(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  job.setJobName("TeraSum");
  job.setJarByClass(TeraChecksum.class);
  job.setMapperClass(ChecksumMapper.class);
  job.setReducerClass(ChecksumReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(Unsigned16.class);
  // force a single reducer
  job.setNumReduceTasks(1);
  job.setInputFormatClass(TeraInputFormat.class);
  return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TeraChecksum.java

示例7: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * The main driver for word count map/reduce program. Invoke this method to
 * submit the map/reduce job.
 * 
 * @throws IOException
 *           When there is communication problems with the job tracker.
 */
@SuppressWarnings("unchecked")
public static void main(String[] args) 
  throws IOException, InterruptedException, ClassNotFoundException  {
  Job job = ValueAggregatorJob.createValueAggregatorJob(args
      , new Class[] {WordCountPlugInClass.class});
  job.setJarByClass(AggregateWordCount.class);
  int ret = job.waitForCompletion(true) ? 0 : 1;
  System.exit(ret);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:AggregateWordCount.java

示例8: runImport

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Runs an import job with the specified command line args
 * @param args
 * @return true if job completed successfully
 * @throws IOException
 * @throws InterruptedException
 * @throws ClassNotFoundException
 */
boolean runImport(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
  // need to make a copy of the configuration because to make sure different temp dirs are used.
  GenericOptionsParser opts =
    new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args);
  Configuration conf = opts.getConfiguration();
  args = opts.getRemainingArgs();
  Job job = Import.createSubmittableJob(conf, args);
  job.waitForCompletion(false);
  return job.isSuccessful();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestImportExport.java

示例9: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) {
    if (args.length != 2) {
        System.err.println("Usage: Year Traffic Statistics <input path> <output path>");
        System.exit(-1);
    }
    String nginxLogInput = args[0];
    String nginxLogOutput = args[1];

    Configuration configuration = new Configuration();
    try {
        Job job = Job.getInstance(configuration);
        job.setJobName("YearTrafficStatistics");

        job.setJarByClass(YearTrafficStatisticsMapReduce.class);

        FileInputFormat.addInputPath(job, new Path(nginxLogInput));
        FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapperClass(YearTrafficStatisticsMapper.class);
        job.setReducerClass(YearTrafficStatisticsReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.waitForCompletion(true);
    } catch (IOException | InterruptedException | ClassNotFoundException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:35,代码来源:YearTrafficStatisticsMapReduce.java

示例10: runExport

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
/**
 * Runs an export job with the specified command line args
 * @param args
 * @return true if job completed successfully
 * @throws IOException
 * @throws InterruptedException
 * @throws ClassNotFoundException
 */
boolean runExport(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
  // need to make a copy of the configuration because to make sure different temp dirs are used.
  GenericOptionsParser opts =
    new GenericOptionsParser(new Configuration(UTIL.getConfiguration()), args);
  Configuration conf = opts.getConfiguration();
  args = opts.getRemainingArgs();
  Job job = Export.createSubmittableJob(conf, args);
  job.waitForCompletion(false);
  return job.isSuccessful();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestImportExport.java

示例11: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  String[] otherArgs = new GenericOptionsParser(getConf(), args).getRemainingArgs();
  Job job = createSubmittableJob(otherArgs);
  if (job == null) return 1;
  if (!job.waitForCompletion(true)) {
    LOG.info("Map-reduce job failed!");
    if (bulkload) {
      LOG.info("Files are not bulkloaded!");
    }
    return 1;
  }
  int code = 0;
  if (bulkload) {
    code = new LoadIncrementalHFiles(this.getConf()).run(new String[]{this.bulkloadDir.toString(),
        this.dstTableName});
    if (code == 0) {
      // bulkloadDir is deleted only LoadIncrementalHFiles was successful so that one can rerun
      // LoadIncrementalHFiles.
      FileSystem fs = FileSystem.get(this.getConf());
      if (!fs.delete(this.bulkloadDir, true)) {
        LOG.error("Deleting folder " + bulkloadDir + " failed!");
        code = 1;
      }
    }
  }
  return code;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CopyTable.java

示例12: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  if (args.length != 2) {
    System.err.println("Usage: wordmedian <in> <out>");
    return 0;
  }

  setConf(new Configuration());
  Configuration conf = getConf();

  Job job = Job.getInstance(conf, "word median");
  job.setJarByClass(WordMedian.class);
  job.setMapperClass(WordMedianMapper.class);
  job.setCombinerClass(WordMedianReducer.class);
  job.setReducerClass(WordMedianReducer.class);
  job.setOutputKeyClass(IntWritable.class);
  job.setOutputValueClass(IntWritable.class);
  FileInputFormat.addInputPath(job, new Path(args[0]));
  FileOutputFormat.setOutputPath(job, new Path(args[1]));
  boolean result = job.waitForCompletion(true);

  // Wait for JOB 1 -- get middle value to check for Median

  long totalWords = job.getCounters()
      .getGroup(TaskCounter.class.getCanonicalName())
      .findCounter("MAP_OUTPUT_RECORDS", "Map output records").getValue();
  int medianIndex1 = (int) Math.ceil((totalWords / 2.0));
  int medianIndex2 = (int) Math.floor((totalWords / 2.0));

  median = readAndFindMedian(args[1], medianIndex1, medianIndex2, conf);

  return (result ? 0 : 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:WordMedian.java

示例13: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) {
    if (args.length != 2) {
        System.err.println("Usage: Month Traffic Statistics <input path> <output path>");
        System.exit(-1);
    }
    String nginxLogInput = args[0];
    String nginxLogOutput = args[1];

    Configuration configuration = new Configuration();
    try {
        Job job = Job.getInstance(configuration);
        job.setJobName("MonthTrafficStatistics");

        job.setJarByClass(MonthTrafficStatisticsMapReduce.class);

        FileInputFormat.addInputPath(job, new Path(nginxLogInput));
        FileOutputFormat.setOutputPath(job, new Path(nginxLogOutput));

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setMapperClass(MonthTrafficStatisticsMapper.class);
        job.setReducerClass(MonthTrafficStatisticsReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.waitForCompletion(true);
    } catch (IOException | InterruptedException | ClassNotFoundException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:35,代码来源:MonthTrafficStatisticsMapReduce.java

示例14: run

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
@Override
	public int run(String[] arg0) throws Exception {
		List<Scan> mainSiteScan = new ArrayList<Scan>();
		Scan siteScan = new Scan();
		siteScan.setAttribute("scan.attributes.table.name", Bytes.toBytes("myTest07WebSite"));
		System.out.println(siteScan.getAttribute("scan.attributes.table.name"));
		mainSiteScan.add(siteScan);
 
		Scan webSitehitScan = new Scan();
		webSitehitScan.setAttribute("scan.attributes.table.name", Bytes.toBytes("myTest07SiteHits"));// lookup for the table which we have created and is having the site hit data.
		System.out.println(webSitehitScan.getAttribute("scan.attributes.table.name"));
		mainSiteScan.add(webSitehitScan);
 
		Configuration conf = new Configuration();
		Job job = new Job(conf);
// will get the server details of Hbase/hadoop	
		job.setJarByClass(TableWebsiteJob.class);
 // setting the class name to the job
		TableMapReduceUtil.initTableMapperJob(
				mainSiteScan, // tables to read from 
				TableWebsiteMapper.class, 
				Text.class, 
				IntWritable.class, 
				job);
	    TableMapReduceUtil.initTableReducerJob(
	    		"myTest07SiteHitsPlusWebSite",
	    		TableWebsiteReducer.class, 
	    		job);
	    job.waitForCompletion(true);
		return 0;
// totalhit is the third table which will receive the data
	}
 
开发者ID:PacktPublishing,项目名称:HBase-High-Performance-Cookbook,代码行数:33,代码来源:TableWebsiteJob.java

示例15: main

import org.apache.hadoop.mapreduce.Job; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(UnitSum.class);
        job.setMapperClass(PassMapper.class);
        job.setReducerClass(SumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.waitForCompletion(true);
    }
 
开发者ID:yogykwan,项目名称:mapreduce-samples,代码行数:14,代码来源:UnitSum.java


注:本文中的org.apache.hadoop.mapreduce.Job.waitForCompletion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。