本文整理汇总了Java中org.apache.flink.runtime.jobgraph.JobGraph.setSnapshotSettings方法的典型用法代码示例。如果您正苦于以下问题:Java JobGraph.setSnapshotSettings方法的具体用法?Java JobGraph.setSnapshotSettings怎么用?Java JobGraph.setSnapshotSettings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.runtime.jobgraph.JobGraph
的用法示例。
在下文中一共展示了JobGraph.setSnapshotSettings方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createSimpleJobGraph
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
private JobGraph createSimpleJobGraph() {
JobVertex jobVertex = new JobVertex("Vertex");
jobVertex.setInvokableClass(NoOpInvokable.class);
List<JobVertexID> vertexIdList = Collections.singletonList(jobVertex.getID());
JobGraph jg = new JobGraph("test job", jobVertex);
jg.setSnapshotSettings(
new JobCheckpointingSettings(
vertexIdList,
vertexIdList,
vertexIdList,
new CheckpointCoordinatorConfiguration(
5000,
5000,
0L,
10,
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
true),
null));
return jg;
}
示例2: createExecutionGraph
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
private ExecutionGraph createExecutionGraph(Configuration configuration) throws Exception {
final ScheduledExecutorService executor = TestingUtils.defaultExecutor();
final JobID jobId = new JobID();
final JobGraph jobGraph = new JobGraph(jobId, "test");
jobGraph.setSnapshotSettings(
new JobCheckpointingSettings(
Collections.<JobVertexID>emptyList(),
Collections.<JobVertexID>emptyList(),
Collections.<JobVertexID>emptyList(),
new CheckpointCoordinatorConfiguration(
100,
10 * 60 * 1000,
0,
1,
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
false),
null));
return ExecutionGraphBuilder.buildGraph(
null,
jobGraph,
configuration,
executor,
executor,
new ProgrammedSlotProvider(1),
getClass().getClassLoader(),
new StandaloneCheckpointRecoveryFactory(),
Time.seconds(10),
new NoRestartStrategy(),
new UnregisteredMetricsGroup(),
1,
blobWriter,
LoggerFactory.getLogger(getClass()));
}
示例3: testDeserializationOfUserCodeWithUserClassLoader
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
@Test
public void testDeserializationOfUserCodeWithUserClassLoader() throws Exception {
final ClassLoader classLoader = new URLClassLoader(new URL[0], getClass().getClassLoader());
final Serializable outOfClassPath = CommonTestUtils.createObjectForClassNotInClassPath(classLoader);
final MasterTriggerRestoreHook.Factory[] hooks = {
new TestFactory(outOfClassPath) };
final SerializedValue<MasterTriggerRestoreHook.Factory[]> serHooks = new SerializedValue<>(hooks);
final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings(
Collections.<JobVertexID>emptyList(),
Collections.<JobVertexID>emptyList(),
Collections.<JobVertexID>emptyList(),
new CheckpointCoordinatorConfiguration(
1000L,
10000L,
0L,
1,
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
true),
new SerializedValue<StateBackend>(new CustomStateBackend(outOfClassPath)),
serHooks);
final JobGraph jobGraph = new JobGraph(new JobID(), "test job");
jobGraph.setSnapshotSettings(checkpointingSettings);
// to serialize/deserialize the job graph to see if the behavior is correct under
// distributed execution
final JobGraph copy = CommonTestUtils.createCopySerializable(jobGraph);
final ExecutionGraph eg = ExecutionGraphBuilder.buildGraph(
null,
copy,
new Configuration(),
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
mock(SlotProvider.class),
classLoader,
new StandaloneCheckpointRecoveryFactory(),
Time.seconds(10),
new NoRestartStrategy(),
new UnregisteredMetricsGroup(),
10,
VoidBlobWriter.getInstance(),
log);
assertEquals(1, eg.getCheckpointCoordinator().getNumberOfRegisteredMasterHooks());
assertTrue(jobGraph.getCheckpointingSettings().getDefaultStateBackend().deserializeValue(classLoader) instanceof CustomStateBackend);
}
示例4: testCancelJobWithSavepointFailurePeriodicCheckpoints
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests that a failed savepoint does not cancel the job and new checkpoints are triggered
* after the failed cancel-with-savepoint.
*/
@Test
public void testCancelJobWithSavepointFailurePeriodicCheckpoints() throws Exception {
File savepointTarget = tmpFolder.newFolder();
// A source that declines savepoints, simulating the behaviour of a
// failed savepoint.
JobVertex sourceVertex = new JobVertex("Source");
sourceVertex.setInvokableClass(FailOnSavepointSourceTask.class);
sourceVertex.setParallelism(1);
JobGraph jobGraph = new JobGraph("TestingJob", sourceVertex);
CheckpointCoordinatorConfiguration coordConfig = new CheckpointCoordinatorConfiguration(
50,
3600000,
0,
Integer.MAX_VALUE,
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
true);
JobCheckpointingSettings snapshottingSettings = new JobCheckpointingSettings(
Collections.singletonList(sourceVertex.getID()),
Collections.singletonList(sourceVertex.getID()),
Collections.singletonList(sourceVertex.getID()),
coordConfig,
null);
jobGraph.setSnapshotSettings(snapshottingSettings);
final TestingCluster testingCluster = new TestingCluster(
new Configuration(),
highAvailabilityServices,
true,
false);
try {
testingCluster.start(true);
FiniteDuration askTimeout = new FiniteDuration(30, TimeUnit.SECONDS);
ActorGateway jobManager = testingCluster.getLeaderGateway(askTimeout);
testingCluster.submitJobDetached(jobGraph);
// Wait for the source to be running otherwise the savepoint
// barrier will not reach the task.
Future<Object> allTasksAlive = jobManager.ask(
new WaitForAllVerticesToBeRunning(jobGraph.getJobID()),
askTimeout);
Await.ready(allTasksAlive, askTimeout);
// Cancel with savepoint. The expected outcome is that cancellation
// fails due to a failed savepoint. After this, periodic checkpoints
// should resume.
Future<Object> cancellationFuture = jobManager.ask(
new CancelJobWithSavepoint(jobGraph.getJobID(), savepointTarget.getAbsolutePath()),
askTimeout);
Object cancellationResponse = Await.result(cancellationFuture, askTimeout);
if (cancellationResponse instanceof CancellationFailure) {
if (!FailOnSavepointSourceTask.CHECKPOINT_AFTER_SAVEPOINT_LATCH.await(30, TimeUnit.SECONDS)) {
fail("No checkpoint was triggered after failed savepoint within expected duration");
}
} else {
fail("Unexpected cancellation response from JobManager: " + cancellationResponse);
}
} finally {
testingCluster.stop();
}
}
示例5: testJobManagerJMXMetricAccess
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests that metrics registered on the JobManager are actually accessible via JMX.
*/
@Test
public void testJobManagerJMXMetricAccess() throws Exception {
Deadline deadline = new FiniteDuration(2, TimeUnit.MINUTES).fromNow();
Configuration flinkConfiguration = new Configuration();
flinkConfiguration.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, JMXReporter.class.getName());
flinkConfiguration.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "test.port", "9060-9075");
flinkConfiguration.setString(MetricOptions.SCOPE_NAMING_JM_JOB, "jobmanager.<job_name>");
TestingCluster flink = new TestingCluster(flinkConfiguration);
try {
flink.start();
JobVertex sourceJobVertex = new JobVertex("Source");
sourceJobVertex.setInvokableClass(BlockingInvokable.class);
JobGraph jobGraph = new JobGraph("TestingJob", sourceJobVertex);
jobGraph.setSnapshotSettings(new JobCheckpointingSettings(
Collections.<JobVertexID>emptyList(),
Collections.<JobVertexID>emptyList(),
Collections.<JobVertexID>emptyList(),
new CheckpointCoordinatorConfiguration(
500,
500,
50,
5,
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
true),
null));
flink.waitForActorsToBeAlive();
flink.submitJobDetached(jobGraph);
Future<Object> jobRunning = flink.getLeaderGateway(deadline.timeLeft())
.ask(new TestingJobManagerMessages.WaitForAllVerticesToBeRunning(jobGraph.getJobID()), deadline.timeLeft());
Await.ready(jobRunning, deadline.timeLeft());
MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
Set<ObjectName> nameSet = mBeanServer.queryNames(new ObjectName("org.apache.flink.jobmanager.job.lastCheckpointSize:job_name=TestingJob,*"), null);
Assert.assertEquals(1, nameSet.size());
assertEquals(-1L, mBeanServer.getAttribute(nameSet.iterator().next(), "Value"));
Future<Object> jobFinished = flink.getLeaderGateway(deadline.timeLeft())
.ask(new TestingJobManagerMessages.NotifyWhenJobRemoved(jobGraph.getJobID()), deadline.timeLeft());
BlockingInvokable.unblock();
// wait til the job has finished
Await.ready(jobFinished, deadline.timeLeft());
} finally {
flink.stop();
}
}