本文整理汇总了Java中org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit类的典型用法代码示例。如果您正苦于以下问题:Java HadoopInputSplit类的具体用法?Java HadoopInputSplit怎么用?Java HadoopInputSplit使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HadoopInputSplit类属于org.apache.flink.api.java.hadoop.mapred.wrapper包,在下文中一共展示了HadoopInputSplit类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createInputSplits
import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit; //导入依赖的package包/类
@Override
public HadoopInputSplit[] createInputSplits(int minNumSplits)
throws IOException {
org.apache.hadoop.mapred.InputSplit[] splitArray = mapredInputFormat.getSplits(jobConf, minNumSplits);
HadoopInputSplit[] hiSplit = new HadoopInputSplit[splitArray.length];
for (int i = 0; i < splitArray.length; i++) {
hiSplit[i] = new HadoopInputSplit(i, splitArray[i], jobConf);
}
return hiSplit;
}
示例2: open
import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit; //导入依赖的package包/类
@Override
public void open(HadoopInputSplit split) throws IOException {
// enforce sequential open() calls
synchronized (OPEN_MUTEX) {
this.recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());
if (this.recordReader instanceof Configurable) {
((Configurable) this.recordReader).setConf(jobConf);
}
key = this.recordReader.createKey();
value = this.recordReader.createValue();
this.fetched = false;
}
}
示例3: createInputSplits
import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit; //导入依赖的package包/类
@Override
public HadoopInputSplit[] createInputSplits(int minNumSplits)
throws IOException {
org.apache.hadoop.mapred.InputSplit[] splitArray = mapredInputFormat.getSplits(jobConf, minNumSplits);
HadoopInputSplit[] hiSplit = new HadoopInputSplit[splitArray.length];
for (int i = 0; i < splitArray.length; i++) {
hiSplit[i] = new HadoopInputSplit(i, splitArray[i], jobConf);
}
return hiSplit;
}
示例4: getInputSplitAssigner
import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit; //导入依赖的package包/类
@Override
public InputSplitAssigner getInputSplitAssigner(HadoopInputSplit[] inputSplits) {
return new LocatableInputSplitAssigner(inputSplits);
}
示例5: getHadoopInputSplit
import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit; //导入依赖的package包/类
private HadoopInputSplit getHadoopInputSplit() {
return new HadoopInputSplit(1, getFileSplit(), new JobConf());
}
示例6: open
import org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopInputSplit; //导入依赖的package包/类
@Override
public void open(HadoopInputSplit split) throws IOException {
this.jobConf = split.getJobConf();
this.flowProcess = new FlinkFlowProcess(this.jobConf, this.getRuntimeContext(), flowNode.getID());
processBeginTime = System.currentTimeMillis();
flowProcess.increment( SliceCounters.Process_Begin_Time, processBeginTime );
try {
Set<FlowElement> sources = flowNode.getSourceElements();
if(sources.size() != 1) {
throw new RuntimeException("FlowNode for TapInputFormat may only have a single source");
}
FlowElement sourceElement = sources.iterator().next();
if(!(sourceElement instanceof Tap)) {
throw new RuntimeException("Source of TapInputFormat must be a Tap");
}
Tap source = (Tap)sourceElement;
streamGraph = new SourceStreamGraph( flowProcess, flowNode, source );
sourceStage = this.streamGraph.getSourceStage();
sinkStage = this.streamGraph.getSinkStage();
for( Duct head : streamGraph.getHeads() ) {
LOG.info("sourcing from: " + ((ElementDuct) head).getFlowElement());
}
for( Duct tail : streamGraph.getTails() ) {
LOG.info("sinking to: " + ((ElementDuct) tail).getFlowElement());
}
}
catch( Throwable throwable ) {
if( throwable instanceof CascadingException) {
throw (CascadingException) throwable;
}
throw new FlowException( "internal error during TapInputFormat configuration", throwable );
}
RecordReader<?, ?> recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());
if (recordReader instanceof Configurable) {
((Configurable) recordReader).setConf(jobConf);
}
else if (recordReader instanceof JobConfigurable) {
((JobConfigurable) recordReader).configure(jobConf);
}
try {
this.sourceStage.setRecordReader(recordReader);
} catch(Throwable t) {
if(t instanceof IOException) {
throw (IOException)t;
}
else {
throw new RuntimeException(t);
}
}
}