当前位置: 首页>>代码示例>>Java>>正文


Java Reducer类代码示例

本文整理汇总了Java中org.apache.hadoop.mapred.Reducer的典型用法代码示例。如果您正苦于以下问题:Java Reducer类的具体用法?Java Reducer怎么用?Java Reducer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Reducer类属于org.apache.hadoop.mapred包,在下文中一共展示了Reducer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: combineAndSpill

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:MergeManagerImpl.java

示例2: HadoopReduceCombineFunction

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
/**
 * Maps two Hadoop Reducer (mapred API) to a combinable Flink GroupReduceFunction.
 *
 * @param hadoopReducer The Hadoop Reducer that is mapped to a GroupReduceFunction.
 * @param hadoopCombiner The Hadoop Reducer that is mapped to the combiner function.
 * @param conf The JobConf that is used to configure both Hadoop Reducers.
 */
public HadoopReduceCombineFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer,
							Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN> hadoopCombiner, JobConf conf) {
	if (hadoopReducer == null) {
		throw new NullPointerException("Reducer may not be null.");
	}
	if (hadoopCombiner == null) {
		throw new NullPointerException("Combiner may not be null.");
	}
	if (conf == null) {
		throw new NullPointerException("JobConf may not be null.");
	}

	this.reducer = hadoopReducer;
	this.combiner = hadoopCombiner;
	this.jobConf = conf;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:24,代码来源:HadoopReduceCombineFunction.java

示例3: HadoopReduceCombineFunction

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
/**
 * Maps two Hadoop Reducer (mapred API) to a combinable Flink GroupReduceFunction.
 * 
 * @param hadoopReducer The Hadoop Reducer that is mapped to a GroupReduceFunction.
 * @param hadoopCombiner The Hadoop Reducer that is mapped to the combiner function.
 * @param conf The JobConf that is used to configure both Hadoop Reducers.
 */
public HadoopReduceCombineFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer,
							Reducer<KEYIN,VALUEIN,KEYIN,VALUEIN> hadoopCombiner, JobConf conf) {
	if(hadoopReducer == null) {
		throw new NullPointerException("Reducer may not be null.");
	}
	if(hadoopCombiner == null) {
		throw new NullPointerException("Combiner may not be null.");
	}
	if(conf == null) {
		throw new NullPointerException("JobConf may not be null.");
	}
	
	this.reducer = hadoopReducer;
	this.combiner = hadoopCombiner;
	this.jobConf = conf;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:24,代码来源:HadoopReduceCombineFunction.java

示例4: open

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);
	
	this.reporter = new HadoopDummyReporter();
	this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:HadoopReduceFunction.java

示例5: configure

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
public void configure(JobConf job) {
    super.configure(job);
    Class<?> c = job.getClass("stream.reduce.posthook", null, Mapper.class);
    if(c != null) {
        postMapper = (Mapper)ReflectionUtils.newInstance(c, job);
        LOG.info("PostHook="+c.getName());
    }

    c = job.getClass("stream.reduce.prehook", null, Reducer.class);
    if(c != null) {
        preReducer = (Reducer)ReflectionUtils.newInstance(c, job);
        oc = new InmemBufferingOutputCollector();
        LOG.info("PreHook="+c.getName());
    }
    this.ignoreKey = job.getBoolean("stream.reduce.ignoreKey", false);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:PipeReducer.java

示例6: combineAndSpill

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getOutputKeyComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:23,代码来源:MergeManagerImpl.java

示例7: runOldCombiner

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
private void runOldCombiner(final TezRawKeyValueIterator rawIter, final Writer writer) throws IOException {
  Class<? extends Reducer> reducerClazz = (Class<? extends Reducer>) conf.getClass("mapred.combiner.class", null, Reducer.class);
  
  Reducer combiner = ReflectionUtils.newInstance(reducerClazz, conf);
  
  OutputCollector collector = new OutputCollector() {
    @Override
    public void collect(Object key, Object value) throws IOException {
      writer.append(key, value);
    }
  };
  
  CombinerValuesIterator values = new CombinerValuesIterator(rawIter, keyClass, valClass, comparator);
  
  while (values.moveToNext()) {
    combiner.reduce(values.getKey(), values.getValues().iterator(), collector, reporter);
  }
}
 
开发者ID:apache,项目名称:incubator-tez,代码行数:19,代码来源:MRCombiner.java

示例8: runOldCombiner

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
private void runOldCombiner(final TezRawKeyValueIterator rawIter, final Writer writer) throws IOException {
  Class<? extends Reducer> reducerClazz = (Class<? extends Reducer>) conf.getClass("mapred.combiner.class", null, Reducer.class);
  
  Reducer combiner = ReflectionUtils.newInstance(reducerClazz, conf);
  
  OutputCollector collector = new OutputCollector() {
    @Override
    public void collect(Object key, Object value) throws IOException {
      writer.append(key, value);
      combineOutputRecordsCounter.increment(1);
    }
  };
  
  CombinerValuesIterator values = new CombinerValuesIterator(rawIter, keyClass, valClass, comparator);
  
  while (values.moveToNext()) {
    combiner.reduce(values.getKey(), values.getValues().iterator(), collector, reporter);
  }
}
 
开发者ID:apache,项目名称:tez,代码行数:20,代码来源:MRCombiner.java

示例9: open

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);
	this.combiner.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<>(keySerializer);
	this.combineCollector = new HadoopOutputCollector<>();
	this.reduceCollector = new HadoopOutputCollector<>();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:15,代码来源:HadoopReduceCombineFunction.java

示例10: getProducedType

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public TypeInformation<Tuple2<KEYOUT, VALUEOUT>> getProducedType() {
	Class<KEYOUT> outKeyClass = (Class<KEYOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 2);
	Class<VALUEOUT> outValClass = (Class<VALUEOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 3);

	final TypeInformation<KEYOUT> keyTypeInfo = TypeExtractor.getForClass(outKeyClass);
	final TypeInformation<VALUEOUT> valueTypleInfo = TypeExtractor.getForClass(outValClass);
	return new TupleTypeInfo<>(keyTypeInfo, valueTypleInfo);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:HadoopReduceCombineFunction.java

示例11: readObject

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {

	Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass =
			(Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject();
	reducer = InstantiationUtil.instantiate(reducerClass);

	Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>> combinerClass =
			(Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>>) in.readObject();
	combiner = InstantiationUtil.instantiate(combinerClass);

	jobConf = new JobConf();
	jobConf.readFields(in);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:15,代码来源:HadoopReduceCombineFunction.java

示例12: HadoopReduceFunction

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
/**
 * Maps a Hadoop Reducer (mapred API) to a non-combinable Flink GroupReduceFunction.
	 *
 * @param hadoopReducer The Hadoop Reducer to wrap.
 * @param conf The JobConf that is used to configure the Hadoop Reducer.
 */
public HadoopReduceFunction(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> hadoopReducer, JobConf conf) {
	if (hadoopReducer == null) {
		throw new NullPointerException("Reducer may not be null.");
	}
	if (conf == null) {
		throw new NullPointerException("JobConf may not be null.");
	}

	this.reducer = hadoopReducer;
	this.jobConf = conf;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:18,代码来源:HadoopReduceFunction.java

示例13: open

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:HadoopReduceFunction.java

示例14: getProducedType

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public TypeInformation<Tuple2<KEYOUT, VALUEOUT>> getProducedType() {
	Class<KEYOUT> outKeyClass = (Class<KEYOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 2);
	Class<VALUEOUT> outValClass = (Class<VALUEOUT>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 3);

	final TypeInformation<KEYOUT> keyTypeInfo = TypeExtractor.getForClass((Class<KEYOUT>) outKeyClass);
	final TypeInformation<VALUEOUT> valueTypleInfo = TypeExtractor.getForClass((Class<VALUEOUT>) outValClass);
	return new TupleTypeInfo<Tuple2<KEYOUT, VALUEOUT>>(keyTypeInfo, valueTypleInfo);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:HadoopReduceFunction.java

示例15: readObject

import org.apache.hadoop.mapred.Reducer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {

	Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass =
			(Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject();
	reducer = InstantiationUtil.instantiate(reducerClass);

	jobConf = new JobConf();
	jobConf.readFields(in);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:HadoopReduceFunction.java


注:本文中的org.apache.hadoop.mapred.Reducer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。