本文整理匯總了Java中org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector類的典型用法代碼示例。如果您正苦於以下問題:Java HadoopOutputCollector類的具體用法?Java HadoopOutputCollector怎麽用?Java HadoopOutputCollector使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
HadoopOutputCollector類屬於org.apache.flink.hadoopcompatibility.mapred.wrapper包,在下文中一共展示了HadoopOutputCollector類的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.reducer.configure(jobConf);
this.reporter = new HadoopDummyReporter();
this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
示例2: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.reducer.configure(jobConf);
this.combiner.configure(jobConf);
this.reporter = new HadoopDummyReporter();
Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
this.valueIterator = new HadoopTupleUnwrappingIterator<>(keySerializer);
this.combineCollector = new HadoopOutputCollector<>();
this.reduceCollector = new HadoopOutputCollector<>();
}
示例3: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.reducer.configure(jobConf);
this.reporter = new HadoopDummyReporter();
this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
示例4: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.mapper.configure(jobConf);
this.reporter = new HadoopDummyReporter();
this.outputCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
}
示例5: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.reducer.configure(jobConf);
this.combiner.configure(jobConf);
this.reporter = new HadoopDummyReporter();
Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
this.valueIterator = new HadoopTupleUnwrappingIterator<>(keySerializer);
this.combineCollector = new HadoopOutputCollector<>();
this.reduceCollector = new HadoopOutputCollector<>();
}
示例6: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.mapper.configure(jobConf);
this.reporter = new HadoopDummyReporter();
this.outputCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
}
示例7: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.reducer.configure(jobConf);
this.combiner.configure(jobConf);
this.reporter = new HadoopDummyReporter();
Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(inKeyClass);
this.combineCollector = new HadoopOutputCollector<KEYIN, VALUEIN>();
this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
}
示例8: open
import org.apache.flink.hadoopcompatibility.mapred.wrapper.HadoopOutputCollector; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.reducer.configure(jobConf);
this.reporter = new HadoopDummyReporter();
this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(inKeyClass);
}