本文整理汇总了Java中com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsResult类的典型用法代码示例。如果您正苦于以下问题:Java AddJobFlowStepsResult类的具体用法?Java AddJobFlowStepsResult怎么用?Java AddJobFlowStepsResult使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
AddJobFlowStepsResult类属于com.amazonaws.services.elasticmapreduce.model包,在下文中一共展示了AddJobFlowStepsResult类的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: existingClusterSubmitter
import com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsResult; //导入依赖的package包/类
private Submitter existingClusterSubmitter(AmazonElasticMapReduce emr, String tag, StepCompiler stepCompiler, String clusterId, Filer filer)
{
return () -> {
List<String> stepIds = pollingRetryExecutor(state, "submission")
.retryUnless(AmazonServiceException.class, Aws::isDeterministicException)
.withRetryInterval(DurationInterval.of(Duration.ofSeconds(30), Duration.ofMinutes(5)))
.runOnce(new TypeReference<List<String>>() {}, s -> {
RemoteFile runner = prepareRunner(filer, tag);
// Compile steps
stepCompiler.compile(runner);
// Stage files to S3
filer.stageFiles();
AddJobFlowStepsRequest request = new AddJobFlowStepsRequest()
.withJobFlowId(clusterId)
.withSteps(stepCompiler.stepConfigs());
int steps = request.getSteps().size();
logger.info("Submitting {} EMR step(s) to {}", steps, clusterId);
AddJobFlowStepsResult result = emr.addJobFlowSteps(request);
logSubmittedSteps(clusterId, steps, i -> request.getSteps().get(i).getName(), i -> result.getStepIds().get(i));
return ImmutableList.copyOf(result.getStepIds());
});
return SubmissionResult.ofExistingCluster(clusterId, stepIds);
};
}
示例2: fireEMRJob
import com.amazonaws.services.elasticmapreduce.model.AddJobFlowStepsResult; //导入依赖的package包/类
protected String fireEMRJob(String paramsStr,String clusterId){
StepFactory stepFactory = new StepFactory();
AmazonElasticMapReduceClient emr = new AmazonElasticMapReduceClient();
emr.setRegion(Region.getRegion(Regions.fromName(System.getenv().get("AWS_REGION"))));
Application sparkConfig = new Application()
.withName("Spark");
String[] params = paramsStr.split(",");
StepConfig enabledebugging = new StepConfig()
.withName("Enable debugging")
.withActionOnFailure("TERMINATE_JOB_FLOW")
.withHadoopJarStep(stepFactory.newEnableDebuggingStep());
HadoopJarStepConfig sparkStepConf = new HadoopJarStepConfig()
.withJar("command-runner.jar")
.withArgs(params);
final StepConfig sparkStep = new StepConfig()
.withName("Spark Step")
.withActionOnFailure("CONTINUE")
.withHadoopJarStep(sparkStepConf);
AddJobFlowStepsRequest request = new AddJobFlowStepsRequest(clusterId)
.withSteps(new ArrayList<StepConfig>(){{add(sparkStep);}});
AddJobFlowStepsResult result = emr.addJobFlowSteps(request);
return result.getStepIds().get(0);
}