当前位置: 首页>>代码示例>>Java>>正文


Java LongSumReducer类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer的典型用法代码示例。如果您正苦于以下问题:Java LongSumReducer类的具体用法?Java LongSumReducer怎么用?Java LongSumReducer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LongSumReducer类属于org.apache.hadoop.mapreduce.lib.reduce包,在下文中一共展示了LongSumReducer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doMapReduce

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
private void doMapReduce(final Class<? extends Test> cmd) throws IOException,
      InterruptedException, ClassNotFoundException {
  Configuration conf = getConf();
  Path inputDir = writeInputFile(conf);
  conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
  conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
  Job job = Job.getInstance(conf);
  job.setJarByClass(PerformanceEvaluation.class);
  job.setJobName("HBase Performance Evaluation");

  job.setInputFormatClass(PeInputFormat.class);
  PeInputFormat.setInputPaths(job, inputDir);

  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(LongWritable.class);

  job.setMapperClass(EvaluationMapTask.class);
  job.setReducerClass(LongSumReducer.class);
  job.setNumReduceTasks(1);

  job.setOutputFormatClass(TextOutputFormat.class);
  TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs"));
  TableMapReduceUtil.addDependencyJars(job);
  TableMapReduceUtil.initCredentials(job);
  job.waitForCompletion(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:PerformanceEvaluation.java

示例2: doMapReduce

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
private void doMapReduce(final Class<? extends Test> cmd) throws IOException,
      InterruptedException, ClassNotFoundException {
  Path inputDir = writeInputFile(this.conf);
  this.conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
  this.conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
  Job job = new Job(this.conf);
  job.setJarByClass(PerformanceEvaluation.class);
  job.setJobName("HBase Performance Evaluation");
  
  job.setInputFormatClass(PeInputFormat.class);
  PeInputFormat.setInputPaths(job, inputDir);
  
  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(LongWritable.class);
  
  job.setMapperClass(EvaluationMapTask.class);
  job.setReducerClass(LongSumReducer.class);
      
  job.setNumReduceTasks(1);
  
  job.setOutputFormatClass(TextOutputFormat.class);
  TextOutputFormat.setOutputPath(job, new Path(inputDir,"outputs"));
  
  job.waitForCompletion(true);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:PerformanceEvaluation.java

示例3: doMapReduce

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
private void doMapReduce(final Class<? extends Test> cmd) throws IOException,
      InterruptedException, ClassNotFoundException {
  Configuration conf = getConf();
  Path inputDir = writeInputFile(conf);
  conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
  conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
  Job job = new Job(conf);
  job.setJarByClass(PerformanceEvaluation.class);
  job.setJobName("HBase Performance Evaluation");

  job.setInputFormatClass(PeInputFormat.class);
  PeInputFormat.setInputPaths(job, inputDir);

  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(LongWritable.class);

  job.setMapperClass(EvaluationMapTask.class);
  job.setReducerClass(LongSumReducer.class);
  job.setNumReduceTasks(1);

  job.setOutputFormatClass(TextOutputFormat.class);
  TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs"));
  TableMapReduceUtil.addDependencyJars(job);
  TableMapReduceUtil.initCredentials(job);
  job.waitForCompletion(true);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:PerformanceEvaluation.java

示例4: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
    Configuration conf = super.getConf();
    // TODO: Add to the configuration the postcode index in customer csv file
    // TODO: Add to the configuration the referential file name
    
    Job job = Job.getInstance(conf, JOB_NAME);
    // TODO: Add the cache file URI to the job
    job.setJarByClass(Main.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.setMapperClass(CsvFieldCountMapper.class);
    job.setCombinerClass(LongSumReducer.class);
    job.setReducerClass(LongSumReducer.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);

    FileInputFormat.setInputPaths(job, new Path(args[1]));
    FileOutputFormat.setOutputPath(job, new Path(args[2]));

    boolean success = job.waitForCompletion(true);

    return success ? 0 : 1;
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:27,代码来源:Main.java

示例5: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
    Configuration conf = super.getConf();
    conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
    conf.set(CsvFieldCountMapper.FILTER_CACHE_FILE_NAME, "fr_urban_postcodes.txt");
    
    Job job = Job.getInstance(conf, JOB_NAME);
    job.addCacheFile(new Path(args[0]).toUri());
    job.setJarByClass(Main.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.setMapperClass(CsvFieldCountMapper.class);
    job.setCombinerClass(LongSumReducer.class);
    job.setReducerClass(LongSumReducer.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);

    FileInputFormat.setInputPaths(job, new Path(args[1]));
    FileOutputFormat.setOutputPath(job, new Path(args[2]));

    boolean success = job.waitForCompletion(true);

    return success ? 0 : 1;
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:27,代码来源:Main.java

示例6: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {
    Configuration conf = this.getConf();
    Job job = Job.getInstance(conf, "word count");
    job.setJarByClass(WordCountImproved.class);
 
    job.setInputFormatClass(TextInputFormat.class);
 
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(LongSumReducer.class);
    job.setReducerClass(LongSumReducer.class);
 
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);
    job.setOutputFormatClass(TextOutputFormat.class);
    TextInputFormat.addInputPath(job, new Path(args[0]));
    TextOutputFormat.setOutputPath(job, new Path(args[1]));
 
    return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:hanhanwu,项目名称:Hanhan-Hadoop-MapReduce,代码行数:21,代码来源:WordCountImproved.java

示例7: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
@Override
public int run(String[] args) throws Exception {

	Configuration conf = this.getConf();

	Job job = Job.getInstance(conf, "Extract server type");
	job.setJarByClass(ServerType.class);

	FileInputFormat.addInputPath(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));

	job.setMapperClass(ServerTypeExtracter.class);
	job.setReducerClass(LongSumReducer.class);
	job.setInputFormatClass(WarcInputFormat.class);
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(LongWritable.class);

	// Execute job and return status
	return job.waitForCompletion(true) ? 0 : 1;

}
 
开发者ID:norvigaward,项目名称:warcexamples,代码行数:22,代码来源:ServerType.java

示例8: main

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

        Configuration conf = new Configuration();

        Job job = new Job(conf, "invwordcount");
        job.setJarByClass(InverseWCJob.class);
        job.setOutputKeyClass(LongWritable.class);
        job.setOutputValueClass(LongWritable.class);

        job.setMapperClass(InverseWordCountMapper.class);
        job.setReducerClass(LongSumReducer.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
 
开发者ID:ScrapCodes,项目名称:MapReduceJobs,代码行数:21,代码来源:InverseWCJob.java

示例9: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
/**
 * Builds and runs the Hadoop job.
 * @return	0 if the Hadoop job completes successfully and 1 otherwise.
 */
@Override
public int run(String[] arg0) throws Exception {
	Configuration conf = getConf();
	//
	Job job = new Job(conf);
	job.setJarByClass(WATServerType.class);
	job.setNumReduceTasks(1);
	
	String inputPath = "data/*.warc.wat.gz";
	//inputPath = "s3n://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2013-48/segments/1386163035819/wet/CC-MAIN-20131204131715-00000-ip-10-33-133-15.ec2.internal.warc.wet.gz";
	//inputPath = "s3n://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2013-48/segments/1386163035819/wet/*.warc.wet.gz";
	LOG.info("Input path: " + inputPath);
	FileInputFormat.addInputPath(job, new Path(inputPath));
	
	String outputPath = "/tmp/cc/";
	FileSystem fs = FileSystem.newInstance(conf);
	if (fs.exists(new Path(outputPath))) {
		fs.delete(new Path(outputPath), true);
	}
	FileOutputFormat.setOutputPath(job, new Path(outputPath));
	
	job.setInputFormatClass(WARCFileInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);
	
	job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);
    
    job.setMapperClass(ServerTypeMap.ServerMapper.class);
    job.setReducerClass(LongSumReducer.class);
	
    if (job.waitForCompletion(true)) {
    	return 0;
    } else {
    	return 1;
    }
}
 
开发者ID:TeamHG-Memex,项目名称:common-crawl-mapreduce,代码行数:41,代码来源:WATServerType.java

示例10: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
/**
 * Builds and runs the Hadoop job.
 * @return	0 if the Hadoop job completes successfully and 1 otherwise.
 */
@Override
public int run(String[] arg0) throws Exception {
	Configuration conf = getConf();
	//
	Job job = new Job(conf);
	job.setJarByClass(WETWordCount.class);
	job.setNumReduceTasks(1);
	
	String inputPath = "data/*.warc.wet.gz";
	//inputPath = "s3n://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2013-48/segments/1386163035819/wet/CC-MAIN-20131204131715-00000-ip-10-33-133-15.ec2.internal.warc.wet.gz";
	//inputPath = "s3n://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2013-48/segments/1386163035819/wet/*.warc.wet.gz";
	LOG.info("Input path: " + inputPath);
	FileInputFormat.addInputPath(job, new Path(inputPath));
	
	String outputPath = "/tmp/cc/";
	FileSystem fs = FileSystem.newInstance(conf);
	if (fs.exists(new Path(outputPath))) {
		fs.delete(new Path(outputPath), true);
	}
	FileOutputFormat.setOutputPath(job, new Path(outputPath));
	
	job.setInputFormatClass(WARCFileInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);
	
	job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);
    
    job.setMapperClass(org.commoncrawl.examples.mapreduce.WordCounterMap.WordCountMapper.class);
    // The reducer is quite useful in the word frequency task 
    job.setReducerClass(LongSumReducer.class);
	
    if (job.waitForCompletion(true)) {
    	return 0;
    } else {
    	return 1;
    }
}
 
开发者ID:TeamHG-Memex,项目名称:common-crawl-mapreduce,代码行数:42,代码来源:WETWordCount.java

示例11: run

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
/**
 * Builds and runs the Hadoop job.
 * @return	0 if the Hadoop job completes successfully and 1 otherwise.
 */
@Override
public int run(String[] arg0) throws Exception {
	Configuration conf = getConf();
	//
	Job job = new Job(conf);
	job.setJarByClass(WARCTagCounter.class);
	job.setNumReduceTasks(1);
	
	String inputPath = "data/*.warc.gz";
	//inputPath = "s3n://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2013-48/segments/1386163035819/wet/CC-MAIN-20131204131715-00000-ip-10-33-133-15.ec2.internal.warc.wet.gz";
	//inputPath = "s3n://aws-publicdatasets/common-crawl/crawl-data/CC-MAIN-2013-48/segments/1386163035819/wet/*.warc.wet.gz";
	LOG.info("Input path: " + inputPath);
	FileInputFormat.addInputPath(job, new Path(inputPath));
	
	String outputPath = "/tmp/cc/";
	FileSystem fs = FileSystem.newInstance(conf);
	if (fs.exists(new Path(outputPath))) {
		fs.delete(new Path(outputPath), true);
	}
	FileOutputFormat.setOutputPath(job, new Path(outputPath));

	job.setInputFormatClass(WARCFileInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);

	job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(LongWritable.class);
    
    job.setMapperClass(TagCounterMap.TagCounterMapper.class);
    job.setReducerClass(LongSumReducer.class);

    return job.waitForCompletion(true) ? 0 : -1;
}
 
开发者ID:TeamHG-Memex,项目名称:common-crawl-mapreduce,代码行数:37,代码来源:WARCTagCounter.java

示例12: doMapReduce

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
private void doMapReduce(final Class<? extends Test> cmd) throws IOException,
      InterruptedException, ClassNotFoundException {
  Path inputDir = writeInputFile(this.conf);
  this.conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
  this.conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
  Job job = new Job(this.conf);
  job.setJarByClass(PerformanceEvaluation.class);
  job.setJobName("HBase Performance Evaluation");

  job.setInputFormatClass(PeInputFormat.class);
  PeInputFormat.setInputPaths(job, inputDir);

  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(LongWritable.class);

  job.setMapperClass(EvaluationMapTask.class);
  job.setReducerClass(LongSumReducer.class);

  job.setNumReduceTasks(1);

  job.setOutputFormatClass(TextOutputFormat.class);
  TextOutputFormat.setOutputPath(job, new Path(inputDir,"outputs"));

  TableMapReduceUtil.addDependencyJars(job);
  // Add a Class from the hbase.jar so it gets registered too.
  TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
    org.apache.hadoop.hbase.util.Bytes.class);

  TableMapReduceUtil.initCredentials(job);

  job.waitForCompletion(true);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:33,代码来源:PerformanceEvaluation.java

示例13: doMapReduce

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
private void doMapReduce(final Class<? extends Test> cmd, TestOptions opts) throws IOException,
      InterruptedException, ClassNotFoundException {
  Configuration conf = getConf();
  Path inputDir = writeInputFile(conf, opts);
  conf.set(EvaluationMapTask.CMD_KEY, cmd.getName());
  conf.set(EvaluationMapTask.PE_KEY, getClass().getName());
  Job job = new Job(conf);
  job.setJarByClass(PerformanceEvaluation.class);
  job.setJobName("HBase Performance Evaluation");

  job.setInputFormatClass(NLineInputFormat.class);
  NLineInputFormat.setInputPaths(job, inputDir);
  // this is default, but be explicit about it just in case.
  NLineInputFormat.setNumLinesPerSplit(job, 1);

  job.setOutputKeyClass(LongWritable.class);
  job.setOutputValueClass(LongWritable.class);

  job.setMapperClass(EvaluationMapTask.class);
  job.setReducerClass(LongSumReducer.class);

  job.setNumReduceTasks(1);

  job.setOutputFormatClass(TextOutputFormat.class);
  TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs"));

  TableMapReduceUtil.addDependencyJars(job);
  TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
    DescriptiveStatistics.class, // commons-math
    ObjectMapper.class);         // jackson-mapper-asl

  TableMapReduceUtil.initCredentials(job);

  job.waitForCompletion(true);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:36,代码来源:PerformanceEvaluation.java

示例14: before

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
@Before
public void before() throws URISyntaxException {
    CsvFieldCountMapper mapper = new CsvFieldCountMapper();
    LongSumReducer<Text> combiner = new LongSumReducer<Text>();
    LongSumReducer<Text> reducer = new LongSumReducer<Text>();
    mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer, combiner);
    Configuration conf = mapReduceDriver.getConfiguration();
    conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
    conf.set(CsvFieldCountMapper.FILTER_CACHE_FILE_NAME, "fr_urban_postcodes.txt");
    mapReduceDriver.addCacheFile(new File("target/test-classes/referential/fr_urban_postcodes.txt").toURI());

}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:13,代码来源:PostcodeMRTest.java

示例15: before

import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; //导入依赖的package包/类
@Before
public void before() throws URISyntaxException {
    CsvFieldCountMapper mapper = new CsvFieldCountMapper();
    LongSumReducer<Text> combiner = new LongSumReducer<Text>();
    LongSumReducer<Text> reducer = new LongSumReducer<Text>();
    mapReduceDriver = MapReduceDriver.newMapReduceDriver(mapper, reducer, combiner);
    Configuration conf = mapReduceDriver.getConfiguration();
    conf.setInt(CsvFieldCountMapper.CSV_FIELD_IDX, 2);
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:10,代码来源:PostcodeMRTest.java


注:本文中的org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。