本文整理汇总了Java中org.apache.flink.runtime.jobmanager.scheduler.Scheduler.newInstanceAvailable方法的典型用法代码示例。如果您正苦于以下问题:Java Scheduler.newInstanceAvailable方法的具体用法?Java Scheduler.newInstanceAvailable怎么用?Java Scheduler.newInstanceAvailable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.runtime.jobmanager.scheduler.Scheduler
的用法示例。
在下文中一共展示了Scheduler.newInstanceAvailable方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createExecutionGraph
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
private static Tuple2<ExecutionGraph, Instance> createExecutionGraph(RestartStrategy restartStrategy, boolean isSpy) throws Exception {
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
NUM_TASKS);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
JobVertex sender = newJobVertex("Task", NUM_TASKS, NoOpInvokable.class);
JobGraph jobGraph = new JobGraph("Pointwise job", sender);
ExecutionGraph eg = newExecutionGraph(restartStrategy, scheduler);
if (isSpy) {
eg = spy(eg);
}
eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
assertEquals(JobStatus.CREATED, eg.getState());
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
return new Tuple2<>(eg, instance);
}
示例2: testFailExecutionGraphAfterCancel
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* Tests that it is possible to fail a graph via a call to
* {@link ExecutionGraph#failGlobal(Throwable)} after cancellation.
*/
@Test
public void testFailExecutionGraphAfterCancel() throws Exception {
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
2);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
JobVertex vertex = newJobVertex("Test Vertex", 1, NoOpInvokable.class);
ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(
Integer.MAX_VALUE, Integer.MAX_VALUE));
JobGraph jobGraph = new JobGraph("Test Job", vertex);
jobGraph.setExecutionConfig(executionConfig);
ExecutionGraph eg = newExecutionGraph(new InfiniteDelayRestartStrategy(), scheduler);
eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
assertEquals(JobStatus.CREATED, eg.getState());
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
// Fail right after cancel (for example with concurrent slot release)
eg.cancel();
assertEquals(JobStatus.CANCELLING, eg.getState());
eg.failGlobal(new Exception("Test Exception"));
assertEquals(JobStatus.FAILING, eg.getState());
Execution execution = eg.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt();
execution.cancelingComplete();
assertEquals(JobStatus.RESTARTING, eg.getState());
}
示例3: createSchedulerWithInstances
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
private Scheduler createSchedulerWithInstances(int num, TaskManagerGateway taskManagerGateway) {
final Scheduler scheduler = new Scheduler(executor);
final Instance[] instances = new Instance[num];
for (int i = 0; i < instances.length; i++) {
instances[i] = createInstance(taskManagerGateway, 55443 + i);
scheduler.newInstanceAvailable(instances[i]);
}
return scheduler;
}
示例4: testNoResourceAvailableFailure
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* Tests that a blocking batch job fails if there are not enough resources left to schedule the
* succeeding tasks. This test case is related to [FLINK-4296] where finished producing tasks
* swallow the fail exception when scheduling a consumer task.
*/
@Test
public void testNoResourceAvailableFailure() throws Exception {
final JobID jobId = new JobID();
JobVertex v1 = new JobVertex("source");
JobVertex v2 = new JobVertex("sink");
int dop1 = 1;
int dop2 = 1;
v1.setParallelism(dop1);
v2.setParallelism(dop2);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
Scheduler scheduler = new Scheduler(TestingUtils.directExecutionContext());
for (int i = 0; i < dop1; i++) {
scheduler.newInstanceAvailable(
ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new ExecutionGraphTestUtils.SimpleActorGateway(
TestingUtils.directExecutionContext()))));
}
final JobInformation jobInformation = new DummyJobInformation(
jobId,
"failing test job");
// execution graph that executes actions synchronously
ExecutionGraph eg = new ExecutionGraph(
jobInformation,
new DirectScheduledExecutorService(),
TestingUtils.defaultExecutor(),
AkkaUtils.getDefaultTimeout(),
new NoRestartStrategy(),
new RestartAllStrategy.Factory(),
scheduler,
ExecutionGraph.class.getClassLoader(),
blobWriter);
checkJobOffloaded(eg);
eg.setQueuedSchedulingAllowed(false);
List<JobVertex> ordered = Arrays.asList(v1, v2);
eg.attachJobGraph(ordered);
assertEquals(dop1, scheduler.getNumberOfAvailableSlots());
// schedule, this triggers mock deployment
eg.scheduleForExecution();
ExecutionAttemptID attemptID = eg.getJobVertex(v1.getID()).getTaskVertices()[0].getCurrentExecutionAttempt().getAttemptId();
eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.RUNNING));
eg.updateState(new TaskExecutionState(jobId, attemptID, ExecutionState.FINISHED, null));
assertEquals(JobStatus.FAILED, eg.getState());
}
示例5: setupExecution
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
private Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> setupExecution(JobVertex v1, int dop1, JobVertex v2, int dop2) throws Exception {
final JobID jobId = new JobID();
v1.setParallelism(dop1);
v2.setParallelism(dop2);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
for (int i = 0; i < dop1 + dop2; i++) {
scheduler.newInstanceAvailable(
ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new ExecutionGraphTestUtils.SimpleActorGateway(
TestingUtils.directExecutionContext()))));
}
final JobInformation jobInformation = new DummyJobInformation(
jobId,
"some job");
// execution graph that executes actions synchronously
ExecutionGraph eg = new ExecutionGraph(
jobInformation,
new DirectScheduledExecutorService(),
TestingUtils.defaultExecutor(),
AkkaUtils.getDefaultTimeout(),
new NoRestartStrategy(),
new RestartAllStrategy.Factory(),
scheduler,
ExecutionGraph.class.getClassLoader(),
blobWriter);
checkJobOffloaded(eg);
eg.setQueuedSchedulingAllowed(false);
List<JobVertex> ordered = Arrays.asList(v1, v2);
eg.attachJobGraph(ordered);
assertEquals(dop1 + dop2, scheduler.getNumberOfAvailableSlots());
// schedule, this triggers mock deployment
eg.scheduleForExecution();
Map<ExecutionAttemptID, Execution> executions = eg.getRegisteredExecutions();
assertEquals(dop1 + dop2, executions.size());
return new Tuple2<>(eg, executions);
}
示例6: testConstraintsAfterRestart
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void testConstraintsAfterRestart() throws Exception {
//setting up
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
NUM_TASKS);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
JobVertex groupVertex = newJobVertex("Task1", NUM_TASKS, NoOpInvokable.class);
JobVertex groupVertex2 = newJobVertex("Task2", NUM_TASKS, NoOpInvokable.class);
SlotSharingGroup sharingGroup = new SlotSharingGroup();
groupVertex.setSlotSharingGroup(sharingGroup);
groupVertex2.setSlotSharingGroup(sharingGroup);
groupVertex.setStrictlyCoLocatedWith(groupVertex2);
//initiate and schedule job
JobGraph jobGraph = new JobGraph("Pointwise job", groupVertex, groupVertex2);
ExecutionGraph eg = newExecutionGraph(new FixedDelayRestartStrategy(1, 0L), scheduler);
eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
assertEquals(JobStatus.CREATED, eg.getState());
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
//sanity checks
validateConstraints(eg);
//restart automatically
restartAfterFailure(eg, new FiniteDuration(2, TimeUnit.MINUTES), false);
//checking execution vertex properties
validateConstraints(eg);
haltExecution(eg);
}
示例7: testFailWhileRestarting
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void testFailWhileRestarting() throws Exception {
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
NUM_TASKS);
scheduler.newInstanceAvailable(instance);
// Blocking program
ExecutionGraph executionGraph = new ExecutionGraph(
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
new JobID(),
"TestJob",
new Configuration(),
new SerializedValue<>(new ExecutionConfig()),
AkkaUtils.getDefaultTimeout(),
// We want to manually control the restart and delay
new InfiniteDelayRestartStrategy(),
scheduler);
JobVertex jobVertex = new JobVertex("NoOpInvokable");
jobVertex.setInvokableClass(NoOpInvokable.class);
jobVertex.setParallelism(NUM_TASKS);
JobGraph jobGraph = new JobGraph("TestJob", jobVertex);
executionGraph.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
assertEquals(JobStatus.CREATED, executionGraph.getState());
executionGraph.scheduleForExecution();
assertEquals(JobStatus.RUNNING, executionGraph.getState());
// Kill the instance and wait for the job to restart
instance.markDead();
Deadline deadline = TestingUtils.TESTING_DURATION().fromNow();
while (deadline.hasTimeLeft() &&
executionGraph.getState() != JobStatus.RESTARTING) {
Thread.sleep(100);
}
assertEquals(JobStatus.RESTARTING, executionGraph.getState());
// The restarting should not fail with an ordinary exception
executionGraph.failGlobal(new Exception("Test exception"));
assertEquals(JobStatus.RESTARTING, executionGraph.getState());
// but it should fail when sending a SuppressRestartsException
executionGraph.failGlobal(new SuppressRestartsException(new Exception("Test exception")));
assertEquals(JobStatus.FAILED, executionGraph.getState());
// The restart has been aborted
executionGraph.restart(executionGraph.getGlobalModVersion());
assertEquals(JobStatus.FAILED, executionGraph.getState());
}
示例8: testFailExecutionAfterCancel
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* Tests that a graph is not restarted after cancellation via a call to
* {@link ExecutionGraph#failGlobal(Throwable)}. This can happen when a slot is
* released concurrently with cancellation.
*/
@Test
public void testFailExecutionAfterCancel() throws Exception {
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
2);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
JobVertex vertex = newJobVertex("Test Vertex", 1, NoOpInvokable.class);
ExecutionConfig executionConfig = new ExecutionConfig();
executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(
Integer.MAX_VALUE, Integer.MAX_VALUE));
JobGraph jobGraph = new JobGraph("Test Job", vertex);
jobGraph.setExecutionConfig(executionConfig);
ExecutionGraph eg = newExecutionGraph(new InfiniteDelayRestartStrategy(), scheduler);
eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
assertEquals(JobStatus.CREATED, eg.getState());
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
// Fail right after cancel (for example with concurrent slot release)
eg.cancel();
for (ExecutionVertex v : eg.getAllExecutionVertices()) {
v.getCurrentExecutionAttempt().fail(new Exception("Test Exception"));
}
assertEquals(JobStatus.CANCELED, eg.getTerminationFuture().get());
Execution execution = eg.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt();
execution.cancelingComplete();
assertEquals(JobStatus.CANCELED, eg.getState());
}
示例9: testSuspendWhileRestarting
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* Tests that a suspend call while restarting a job, will abort the restarting.
*
* @throws Exception
*/
@Test
public void testSuspendWhileRestarting() throws Exception {
final Time timeout = Time.of(1, TimeUnit.MINUTES);
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
NUM_TASKS);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
JobVertex sender = new JobVertex("Task");
sender.setInvokableClass(NoOpInvokable.class);
sender.setParallelism(NUM_TASKS);
JobGraph jobGraph = new JobGraph("Pointwise job", sender);
ControllableRestartStrategy controllableRestartStrategy = new ControllableRestartStrategy(timeout);
ExecutionGraph eg = new ExecutionGraph(
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
new JobID(),
"Test job",
new Configuration(),
new SerializedValue<>(new ExecutionConfig()),
AkkaUtils.getDefaultTimeout(),
controllableRestartStrategy,
scheduler);
eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
assertEquals(JobStatus.CREATED, eg.getState());
eg.scheduleForExecution();
assertEquals(JobStatus.RUNNING, eg.getState());
instance.markDead();
controllableRestartStrategy.getReachedCanRestart().await(timeout.toMilliseconds(), TimeUnit.MILLISECONDS);
assertEquals(JobStatus.RESTARTING, eg.getState());
eg.suspend(new Exception("Test exception"));
assertEquals(JobStatus.SUSPENDED, eg.getState());
controllableRestartStrategy.unlockRestart();
controllableRestartStrategy.getRestartDone().await(timeout.toMilliseconds(), TimeUnit.MILLISECONDS);
assertEquals(JobStatus.SUSPENDED, eg.getState());
}
示例10: testMultiRegionFailoverAtSameTime
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* Tests that two failover regions failover at the same time, they will not influence each other
* @throws Exception
*/
@Test
public void testMultiRegionFailoverAtSameTime() throws Exception {
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
16);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
JobVertex v4 = new JobVertex("vertex4");
v1.setParallelism(2);
v2.setParallelism(2);
v3.setParallelism(2);
v4.setParallelism(2);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v4.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
v4.connectNewDataSetAsInput(v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = Arrays.asList(v1, v2, v3, v4);
ExecutionGraph eg = new ExecutionGraph(
new DummyJobInformation(
jobId,
jobName),
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
AkkaUtils.getDefaultTimeout(),
new InfiniteDelayRestartStrategy(10),
new RestartPipelinedRegionStrategy.Factory(),
scheduler);
try {
eg.attachJobGraph(ordered);
}
catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
eg.scheduleForExecution();
RestartPipelinedRegionStrategy strategy = (RestartPipelinedRegionStrategy)eg.getFailoverStrategy();
ExecutionVertex ev11 = eg.getJobVertex(v1.getID()).getTaskVertices()[0];
ExecutionVertex ev12 = eg.getJobVertex(v1.getID()).getTaskVertices()[1];
ExecutionVertex ev31 = eg.getJobVertex(v3.getID()).getTaskVertices()[0];
ExecutionVertex ev32 = eg.getJobVertex(v3.getID()).getTaskVertices()[1];
assertEquals(JobStatus.RUNNING, strategy.getFailoverRegion(ev11).getState());
assertEquals(JobStatus.RUNNING, strategy.getFailoverRegion(ev31).getState());
ev11.getCurrentExecutionAttempt().fail(new Exception("new fail"));
ev31.getCurrentExecutionAttempt().fail(new Exception("new fail"));
assertEquals(JobStatus.CANCELLING, strategy.getFailoverRegion(ev11).getState());
assertEquals(JobStatus.CANCELLING, strategy.getFailoverRegion(ev31).getState());
ev32.getCurrentExecutionAttempt().cancelingComplete();
waitUntilFailoverRegionState(strategy.getFailoverRegion(ev31), JobStatus.RUNNING, 1000);
ev12.getCurrentExecutionAttempt().cancelingComplete();
waitUntilFailoverRegionState(strategy.getFailoverRegion(ev11), JobStatus.RUNNING, 1000);
}
示例11: testSucceedingNoticePreceding
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* Tests that if a task reports the result of its preceding task is failed,
* its preceding task will be considered as failed, and start to failover
* TODO: as the report part is not finished yet, this case is ignored temporarily
* @throws Exception
*/
@Ignore
@Test
public void testSucceedingNoticePreceding() throws Exception {
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
14);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
v1.setParallelism(1);
v2.setParallelism(1);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
ExecutionGraph eg = new ExecutionGraph(
new DummyJobInformation(
jobId,
jobName),
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
AkkaUtils.getDefaultTimeout(),
new InfiniteDelayRestartStrategy(10),
new FailoverPipelinedRegionWithDirectExecutor(),
scheduler);
try {
eg.attachJobGraph(ordered);
}
catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
eg.setScheduleMode(ScheduleMode.EAGER);
eg.scheduleForExecution();
RestartPipelinedRegionStrategy strategy = (RestartPipelinedRegionStrategy)eg.getFailoverStrategy();
ExecutionVertex ev11 = eg.getJobVertex(v2.getID()).getTaskVertices()[0];
ExecutionVertex ev21 = eg.getJobVertex(v2.getID()).getTaskVertices()[0];
ev21.getCurrentExecutionAttempt().fail(new Exception("Fail with v1"));
assertEquals(JobStatus.CANCELLING, strategy.getFailoverRegion(ev21).getState());
assertEquals(JobStatus.CANCELLING, strategy.getFailoverRegion(ev11).getState());
}
示例12: createSingleRegionExecutionGraph
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler; //导入方法依赖的package包/类
private static ExecutionGraph createSingleRegionExecutionGraph(RestartStrategy restartStrategy) throws Exception {
Instance instance = ExecutionGraphTestUtils.getInstance(
new ActorTaskManagerGateway(
new SimpleActorGateway(TestingUtils.directExecutionContext())),
14);
Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
scheduler.newInstanceAvailable(instance);
final JobID jobId = new JobID();
final String jobName = "Test Job Sample Name";
JobVertex v1 = new JobVertex("vertex1");
JobVertex v2 = new JobVertex("vertex2");
JobVertex v3 = new JobVertex("vertex3");
v1.setParallelism(3);
v2.setParallelism(2);
v3.setParallelism(2);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
v3.setInvokableClass(AbstractInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
ExecutionGraph eg = new ExecutionGraph(
new DummyJobInformation(
jobId,
jobName),
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
AkkaUtils.getDefaultTimeout(),
restartStrategy,
new FailoverPipelinedRegionWithDirectExecutor(),
scheduler);
try {
eg.attachJobGraph(ordered);
}
catch (JobException e) {
e.printStackTrace();
fail("Job failed with exception: " + e.getMessage());
}
eg.scheduleForExecution();
return eg;
}