当前位置: 首页>>代码示例>>Java>>正文


Java StreamExecutionEnvironment.enableCheckpointing方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.enableCheckpointing方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.enableCheckpointing方法的具体用法?Java StreamExecutionEnvironment.enableCheckpointing怎么用?Java StreamExecutionEnvironment.enableCheckpointing使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.enableCheckpointing方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

    // Setup the execution environment
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(1000);

    DataStream<TweetImpression> tweetStream = env.addSource(new TweetSourceFunction(true), "TweetImpression Source w/ duplicates");

    tweetStream
      .keyBy(TweetImpression.getKeySelector())
      .filter(new DedupeFilterFunction(TweetImpression.getKeySelector(), DEDUPE_CACHE_EXPIRATION_TIME_MS))
      .print();

    // execute program
    env.execute();
  }
 
开发者ID:jgrier,项目名称:FilteringExample,代码行数:17,代码来源:DedupeFilteringJob.java

示例2: testFixedRestartingWhenCheckpointingAndExplicitExecutionRetriesNonZero

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * Checks that in a streaming use case where checkpointing is enabled and the number
 * of execution retries is set to 42 and the delay to 1337, fixed delay restarting is used.
 */
@Test
public void testFixedRestartingWhenCheckpointingAndExplicitExecutionRetriesNonZero() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);
	env.setNumberOfExecutionRetries(42);
	env.getConfig().setExecutionRetryDelay(1337);

	env.fromElements(1).print();

	StreamGraph graph = env.getStreamGraph();
	JobGraph jobGraph = graph.getJobGraph();

	RestartStrategies.RestartStrategyConfiguration restartStrategy =
		jobGraph.getSerializedExecutionConfig().deserializeValue(getClass().getClassLoader()).getRestartStrategy();

	Assert.assertNotNull(restartStrategy);
	Assert.assertTrue(restartStrategy instanceof RestartStrategies.FixedDelayRestartStrategyConfiguration);
	Assert.assertEquals(42, ((RestartStrategies.FixedDelayRestartStrategyConfiguration) restartStrategy).getRestartAttempts());
	Assert.assertEquals(1337, ((RestartStrategies.FixedDelayRestartStrategyConfiguration) restartStrategy).getDelayBetweenAttemptsInterval().toMilliseconds());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:RestartStrategyTest.java

示例3: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.enableCheckpointing(1000);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend"));

	CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource()))
		.setQuery("INSERT INTO example.values (id, counter) values (?, ?);")
		.enableWriteAheadLog()
		.setClusterBuilder(new ClusterBuilder() {

			private static final long serialVersionUID = 2793938419775311824L;

			@Override
			public Cluster buildCluster(Cluster.Builder builder) {
				return builder.addContactPoint("127.0.0.1").build();
			}
		})
		.build();

	sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello");

	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:26,代码来源:CassandraTupleWriteAheadSinkExample.java

示例4: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:RollingSinkFaultToleranceITCase.java

示例5: runPartitioningProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private static void runPartitioningProgram(int jobManagerPort, int parallelism) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
	env.setParallelism(parallelism);
	env.getConfig().enableObjectReuse();

	env.setBufferTimeout(5L);
	env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);

	env
		.addSource(new TimeStampingSource())
		.map(new IdMapper<Tuple2<Long, Long>>())
		.keyBy(0)
		.addSink(new TimestampingSink());

	env.execute("Partitioning Program");
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:17,代码来源:StreamingScalabilityAndLatency.java

示例6: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(3000, CheckpointingMode.EXACTLY_ONCE);
	
	final RMQConnectionConfig connectionConfig = new RMQConnectionConfig.Builder()
		    .setHost("localhost")
		    .setPort(5672)
		    .setVirtualHost("/")
		    .setUserName("guest")
		    .setPassword("guest")
		    .build();
	
	final DataStream<String> stream = env
		    .addSource(new RMQSource<String>(
		        connectionConfig,            // config for the RabbitMQ connection
		        "flink-test",                 // name of the RabbitMQ queue to consume
		        true,                        // use correlation ids; can be false if only at-least-once is required
		        new SimpleStringSchema()))   // deserialization schema to turn messages into Java objects
		    .setParallelism(1);              // non-parallel source is only required for exactly-once
	
	stream.rebalance().map(new MapFunction<String, String>() {
		private static final long serialVersionUID = -6867736771747690202L;

		@Override
		public String map(String value) throws Exception {
			return "RabbitMQ and Flink says: " + value;
		}
	}).print();

	env.execute();
}
 
开发者ID:PacktPublishing,项目名称:Practical-Real-time-Processing-and-Analytics,代码行数:32,代码来源:FlinkRabbitMQSourceExample.java

示例7: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(2);
        env.enableCheckpointing(5000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        env.setStateBackend(new FsStateBackend("file:///Users/zhouzhou/Binary/flink-1.3.2/testcheckpoints/"));
        RawLogGroupListDeserializer deserializer = new RawLogGroupListDeserializer();
        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);
        configProps.put(ConfigConstants.LOG_MAX_NUMBER_PER_FETCH, "10");
        configProps.put(ConfigConstants.LOG_CONSUMER_BEGIN_POSITION, Consts.LOG_FROM_CHECKPOINT);
        configProps.put(ConfigConstants.LOG_CONSUMERGROUP, "23_ots_sla_etl_product");
        DataStream<RawLogGroupList> logTestStream = env.addSource(
                new FlinkLogConsumer<RawLogGroupList>(deserializer, configProps)
        );

        logTestStream.writeAsText("/Users/zhouzhou/Binary/flink-1.3.2/data/newb.txt." + System.nanoTime());
        env.execute("flink log connector");
    }
 
开发者ID:aliyun,项目名称:aliyun-log-flink-connector,代码行数:29,代码来源:ConsumerSample.java

示例8: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    // Read parameters from command line
    final ParameterTool params = ParameterTool.fromArgs(args);

    if(params.getNumberOfParameters() < 4) {
        System.out.println("\nUsage: FlinkReadKafka --read-topic <topic> --write-topic <topic> --bootstrap.servers <kafka brokers> --group.id <groupid>");
        return;
    }


    // setup streaming environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
    env.enableCheckpointing(300000); // 300 seconds
    env.getConfig().setGlobalJobParameters(params);

    DataStream<String> messageStream = env
            .addSource(new FlinkKafkaConsumer010<>(
                    params.getRequired("read-topic"),
                    new SimpleStringSchema(),
                    params.getProperties())).name("Read from Kafka");

    // setup table environment
    StreamTableEnvironment sTableEnv = TableEnvironment.getTableEnvironment(env);


    // Write JSON payload back to Kafka topic
    messageStream.addSink(new FlinkKafkaProducer010<>(
                params.getRequired("write-topic"),
                new SimpleStringSchema(),
                params.getProperties())).name("Write To Kafka");

    env.execute("FlinkReadWriteKafka");
}
 
开发者ID:kgorman,项目名称:TrafficAnalyzer,代码行数:35,代码来源:FlinkReadWriteKafka.java

示例9: exactlyOnceWriteSimulator

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void exactlyOnceWriteSimulator(final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {

		final int checkpointInterval = 100;

		final int restartAttempts = 1;
		final long delayBetweenAttempts = 0L;

		//30 sec timeout for all
		final long txTimeout = 30 * 1000;
		final long txTimeoutMax = 30 * 1000;
		final long txTimeoutGracePeriod = 30 * 1000;

		final String jobName = "ExactlyOnceSimulator";

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(parallelism);

		env.enableCheckpointing(checkpointInterval);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(restartAttempts, delayBetweenAttempts));

		// Pravega Writer
		FlinkPravegaWriter<Integer> pravegaExactlyOnceWriter = streamUtils.newExactlyOnceWriter(outStreamId,
				Integer.class, new IdentityRouter<>());

		env
				.addSource(new IntegerCounterSourceGenerator(numElements))
				.map(new FailingIdentityMapper<>(numElements / parallelism / 2))
				.rebalance()
				.addSink(pravegaExactlyOnceWriter);

		env.execute(jobName);
	}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:33,代码来源:EventCounterApp.java

示例10: standardReadWriteSimulator

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void standardReadWriteSimulator(final StreamId inStreamId, final StreamId outStreamId, final StreamUtils streamUtils, int numElements) throws Exception {

		final int checkpointInterval = 100;
		final int taskFailureRestartAttempts = 1;
		final long delayBetweenRestartAttempts = 0L;
		final long startTime = 0L;
		final String jobName = "standardReadWriteSimulator";

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(parallelism);
		env.enableCheckpointing(checkpointInterval);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(taskFailureRestartAttempts, delayBetweenRestartAttempts));

		// the Pravega reader
		final FlinkPravegaReader<Integer> pravegaSource = streamUtils.getFlinkPravegaParams().newReader(inStreamId, startTime, Integer.class);

		// Pravega Writer
		FlinkPravegaWriter<Integer> pravegaWriter = streamUtils.getFlinkPravegaParams().newWriter(outStreamId, Integer.class, new IdentityRouter<>());
		pravegaWriter.setPravegaWriterMode(PravegaWriterMode.ATLEAST_ONCE);

		DataStream<Integer> stream = env.addSource(pravegaSource).map(new IdentityMapper<>());

		stream.addSink(pravegaWriter);

		stream.addSink(new IntSequenceExactlyOnceValidator(numElements));

		env.execute(jobName);

	}
 
开发者ID:pravega,项目名称:nautilus-samples,代码行数:30,代码来源:EventCounterApp.java

示例11: createJobGraphWithOperatorState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private static JobGraph createJobGraphWithOperatorState(
		int parallelism, int maxParallelism, OperatorCheckpointMethod checkpointMethod) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().setMaxParallelism(maxParallelism);
	env.enableCheckpointing(Long.MAX_VALUE);
	env.setRestartStrategy(RestartStrategies.noRestart());

	StateSourceBase.workStartedLatch = new CountDownLatch(parallelism);

	SourceFunction<Integer> src;

	switch (checkpointMethod) {
		case CHECKPOINTED_FUNCTION:
			src = new PartitionedStateSource(false);
			break;
		case CHECKPOINTED_FUNCTION_BROADCAST:
			src = new PartitionedStateSource(true);
			break;
		case LIST_CHECKPOINTED:
			src = new PartitionedStateSourceListCheckpointed();
			break;
		case NON_PARTITIONED:
			src = new NonPartitionedStateSource();
			break;
		default:
			throw new IllegalArgumentException();
	}

	DataStream<Integer> input = env.addSource(src);

	input.addSink(new DiscardingSink<Integer>());

	return env.getStreamGraph().getJobGraph();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:37,代码来源:RescalingITCase.java

示例12: testCreateSavepointOnFlink11WithRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
	 * This has to be manually executed to create the savepoint on Flink 1.1.
	 */
	@Test
	@Ignore
	public void testCreateSavepointOnFlink11WithRocksDB() throws Exception {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		RocksDBStateBackend rocksBackend =
				new RocksDBStateBackend(new MemoryStateBackend());
//		rocksBackend.enableFullyAsyncSnapshots();
		env.setStateBackend(rocksBackend);
		env.enableCheckpointing(500);
		env.setParallelism(4);
		env.setMaxParallelism(4);

		// create source
		env
				.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
				.flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
				.keyBy(0)
				.flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
				.keyBy(0)
				.flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
				.keyBy(0)
				.transform(
						"custom_operator",
						new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
						new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
				.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

		executeAndSavepoint(
				env,
				"src/test/resources/stateful-udf-migration-itcase-flink1.1-rocksdb-savepoint",
				new Tuple2<>(EXPECTED_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:StatefulJobSavepointFrom11MigrationITCase.java

示例13: createJobGraphWithKeyedAndNonPartitionedOperatorState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private static JobGraph createJobGraphWithKeyedAndNonPartitionedOperatorState(
		int parallelism,
		int maxParallelism,
		int fixedParallelism,
		int numberKeys,
		int numberElements,
		boolean terminateAfterEmission,
		int checkpointingInterval) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().setMaxParallelism(maxParallelism);
	env.enableCheckpointing(checkpointingInterval);
	env.setRestartStrategy(RestartStrategies.noRestart());

	DataStream<Integer> input = env.addSource(new SubtaskIndexNonPartitionedStateSource(
			numberKeys,
			numberElements,
			terminateAfterEmission))
			.setParallelism(fixedParallelism)
			.keyBy(new KeySelector<Integer, Integer>() {
				private static final long serialVersionUID = -7952298871120320940L;

				@Override
				public Integer getKey(Integer value) throws Exception {
					return value;
				}
			});

	SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);

	DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));

	result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());

	return env.getStreamGraph().getJobGraph();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:RescalingITCase.java

示例14: testSavepointRestoreFromFlink11FromRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {

	final int EXPECTED_SUCCESSFUL_CHECKS = 21;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	restoreAndExecute(
			env,
			getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb"),
			new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:StatefulUDFSavepointMigrationITCase.java

示例15: testSavepointRestoreFromFlink11FromRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {

	final int expectedSuccessfulChecks = 21;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	restoreAndExecute(
			env,
			getResourceFilename("stateful-udf-migration-itcase-flink1.1-rocksdb-savepoint"),
			new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, expectedSuccessfulChecks));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:StatefulJobSavepointFrom11MigrationITCase.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.enableCheckpointing方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。