本文整理匯總了Java中org.apache.hadoop.mapred.JobConfigurable類的典型用法代碼示例。如果您正苦於以下問題:Java JobConfigurable類的具體用法?Java JobConfigurable怎麽用?Java JobConfigurable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
JobConfigurable類屬於org.apache.hadoop.mapred包,在下文中一共展示了JobConfigurable類的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: readObject
import org.apache.hadoop.mapred.JobConfigurable; //導入依賴的package包/類
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
// read the parent fields and the final fields
in.defaultReadObject();
// the job conf knows how to deserialize itself
jobConf = new JobConf();
jobConf.readFields(in);
try {
hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType);
}
catch (Exception e) {
throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e);
}
if (hadoopInputSplit instanceof Configurable) {
((Configurable) hadoopInputSplit).setConf(this.jobConf);
}
else if (hadoopInputSplit instanceof JobConfigurable) {
((JobConfigurable) hadoopInputSplit).configure(this.jobConf);
}
hadoopInputSplit.readFields(in);
}
示例2: configure
import org.apache.hadoop.mapred.JobConfigurable; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void configure(Configuration config) {
this.jobConf = HadoopUtil.asJobConfInstance(FlinkConfigConverter.toHadoopConfig(config));
// set the correct class loader
// not necessary for Flink versions >= 0.10 but we set this anyway to be on the safe side
jobConf.setClassLoader(this.getClass().getClassLoader());
this.mapredInputFormat = jobConf.getInputFormat();
if (this.mapredInputFormat instanceof JobConfigurable) {
((JobConfigurable) this.mapredInputFormat).configure(jobConf);
}
}
示例3: configure
import org.apache.hadoop.mapred.JobConfigurable; //導入依賴的package包/類
@Override
public void configure(Configuration parameters) {
// enforce sequential configuration() calls
synchronized (CONFIGURE_MUTEX) {
// configure MR InputFormat if necessary
if (this.mapredInputFormat instanceof Configurable) {
((Configurable) this.mapredInputFormat).setConf(this.jobConf);
} else if (this.mapredInputFormat instanceof JobConfigurable) {
((JobConfigurable) this.mapredInputFormat).configure(this.jobConf);
}
}
}
示例4: configure
import org.apache.hadoop.mapred.JobConfigurable; //導入依賴的package包/類
@Override
public void configure(Configuration parameters) {
// enforce sequential configure() calls
synchronized (CONFIGURE_MUTEX) {
// configure MR OutputFormat if necessary
if (this.mapredOutputFormat instanceof Configurable) {
((Configurable) this.mapredOutputFormat).setConf(this.jobConf);
} else if (this.mapredOutputFormat instanceof JobConfigurable) {
((JobConfigurable) this.mapredOutputFormat).configure(this.jobConf);
}
}
}
示例5: getInputFormat
import org.apache.hadoop.mapred.JobConfigurable; //導入依賴的package包/類
/**
* @return an instance of the {@link InputFormat} in this {@link StorageDescriptor}.
*/
public static InputFormat<?, ?> getInputFormat(StorageDescriptor sd) throws IOException {
try {
InputFormat<?, ?> inputFormat =
ConstructorUtils.invokeConstructor((Class<? extends InputFormat>) Class.forName(sd.getInputFormat()));
if (inputFormat instanceof JobConfigurable) {
((JobConfigurable) inputFormat).configure(new JobConf(getHadoopConfiguration()));
}
return inputFormat;
} catch (ReflectiveOperationException re) {
throw new IOException("Failed to instantiate input format.", re);
}
}
示例6: open
import org.apache.hadoop.mapred.JobConfigurable; //導入依賴的package包/類
@Override
public void open(HadoopInputSplit split) throws IOException {
this.jobConf = split.getJobConf();
this.flowProcess = new FlinkFlowProcess(this.jobConf, this.getRuntimeContext(), flowNode.getID());
processBeginTime = System.currentTimeMillis();
flowProcess.increment( SliceCounters.Process_Begin_Time, processBeginTime );
try {
Set<FlowElement> sources = flowNode.getSourceElements();
if(sources.size() != 1) {
throw new RuntimeException("FlowNode for TapInputFormat may only have a single source");
}
FlowElement sourceElement = sources.iterator().next();
if(!(sourceElement instanceof Tap)) {
throw new RuntimeException("Source of TapInputFormat must be a Tap");
}
Tap source = (Tap)sourceElement;
streamGraph = new SourceStreamGraph( flowProcess, flowNode, source );
sourceStage = this.streamGraph.getSourceStage();
sinkStage = this.streamGraph.getSinkStage();
for( Duct head : streamGraph.getHeads() ) {
LOG.info("sourcing from: " + ((ElementDuct) head).getFlowElement());
}
for( Duct tail : streamGraph.getTails() ) {
LOG.info("sinking to: " + ((ElementDuct) tail).getFlowElement());
}
}
catch( Throwable throwable ) {
if( throwable instanceof CascadingException) {
throw (CascadingException) throwable;
}
throw new FlowException( "internal error during TapInputFormat configuration", throwable );
}
RecordReader<?, ?> recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());
if (recordReader instanceof Configurable) {
((Configurable) recordReader).setConf(jobConf);
}
else if (recordReader instanceof JobConfigurable) {
((JobConfigurable) recordReader).configure(jobConf);
}
try {
this.sourceStage.setRecordReader(recordReader);
} catch(Throwable t) {
if(t instanceof IOException) {
throw (IOException)t;
}
else {
throw new RuntimeException(t);
}
}
}