本文整理汇总了Java中org.apache.hadoop.mapreduce.lib.chain.ChainReducer.setReducer方法的典型用法代码示例。如果您正苦于以下问题:Java ChainReducer.setReducer方法的具体用法?Java ChainReducer.setReducer怎么用?Java ChainReducer.setReducer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.lib.chain.ChainReducer
的用法示例。
在下文中一共展示了ChainReducer.setReducer方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
Configuration conf = this.getConf();
Configuration reduceConf = new Configuration(false);
Configuration mapConf = new Configuration(false);
Job job = Job.getInstance(conf, "correlate logs");
job.setJarByClass(CorrelateLogs.class);
Scan scan = new Scan();
scan.setCaching(500);
scan.setCacheBlocks(false);
scan.addFamily(Bytes.toBytes("struct"));
TableMapReduceUtil.initTableMapperJob(args[0], scan, HBaseMapper.class, Text.class, LongWritable.class, job);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setNumReduceTasks(1);
ChainReducer.setReducer(job, HBaseReducer.class, Text.class, LongWritable.class,
Text.class, LongPairWritable.class, reduceConf);
ChainReducer.addMapper(job, AggregateMapper.class, Text.class, LongPairWritable.class, Text.class, DoubleWritable.class, mapConf);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
TextInputFormat.addInputPath(job, new Path(args[0]));
TextOutputFormat.setOutputPath(job, new Path(args[1]));
return job.waitForCompletion(true) ? 0 : 1;
}
示例2: addMapReduce
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer; //导入方法依赖的package包/类
@Override
public void addMapReduce(final Class<? extends Mapper> mapper,
final Class<? extends Reducer> combiner,
final Class<? extends Reducer> reducer,
final Class<? extends WritableComparator> comparator,
final Class<? extends WritableComparable> mapOutputKey,
final Class<? extends WritableComparable> mapOutputValue,
final Class<? extends WritableComparable> reduceOutputKey,
final Class<? extends WritableComparable> reduceOutputValue,
final Configuration configuration) {
Configuration mergedConf = overlayConfiguration(getConf(), configuration);
try {
final Job job;
if (State.NONE == this.state || State.REDUCER == this.state) {
// Create a new job with a reference to mergedConf
job = Job.getInstance(mergedConf);
job.setJobName(makeClassName(mapper) + ARROW + makeClassName(reducer));
HBaseAuthHelper.setHBaseAuthToken(mergedConf, job);
this.jobs.add(job);
} else {
job = this.jobs.get(this.jobs.size() - 1);
job.setJobName(job.getJobName() + ARROW + makeClassName(mapper) + ARROW + makeClassName(reducer));
}
job.setNumReduceTasks(this.getConf().getInt("mapreduce.job.reduces", this.getConf().getInt("mapreduce.tasktracker.reduce.tasks.maximum", 1)));
ChainMapper.addMapper(job, mapper, NullWritable.class, FaunusVertex.class, mapOutputKey, mapOutputValue, mergedConf);
ChainReducer.setReducer(job, reducer, mapOutputKey, mapOutputValue, reduceOutputKey, reduceOutputValue, mergedConf);
if (null != comparator)
job.setSortComparatorClass(comparator);
if (null != combiner)
job.setCombinerClass(combiner);
if (null == job.getConfiguration().get(MAPREDUCE_MAP_OUTPUT_COMPRESS, null))
job.getConfiguration().setBoolean(MAPREDUCE_MAP_OUTPUT_COMPRESS, true);
if (null == job.getConfiguration().get(MAPREDUCE_MAP_OUTPUT_COMPRESS_CODEC, null))
job.getConfiguration().setClass(MAPREDUCE_MAP_OUTPUT_COMPRESS_CODEC, DefaultCodec.class, CompressionCodec.class);
this.state = State.REDUCER;
} catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
}
}