当前位置: 首页>>代码示例>>Java>>正文


Java StreamExecutionEnvironment.setMaxParallelism方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setMaxParallelism方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.setMaxParallelism方法的具体用法?Java StreamExecutionEnvironment.setMaxParallelism怎么用?Java StreamExecutionEnvironment.setMaxParallelism使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.setMaxParallelism方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCreateSavepointOnFlink12

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * This has to be manually executed to create the savepoint on Flink 1.2.
 */
@Test
@Ignore
public void testCreateSavepointOnFlink12() throws Exception {

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	env.setStateBackend(new MemoryStateBackend());
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	env
			.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.keyBy(0)
			.transform(
					"timely_stateful_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new TimelyStatefulOperator()).uid("TimelyStatefulOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>());

	executeAndSavepoint(
			env,
			"src/test/resources/" + getSavepointPath(),
			new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:StatefulJobSavepointFrom12MigrationITCase.java

示例2: testCreateSavepointOnFlink12WithRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * This has to be manually executed to create the savepoint on Flink 1.2.
 */
@Test
@Ignore
public void testCreateSavepointOnFlink12WithRocksDB() throws Exception {

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	RocksDBStateBackend rocksBackend =
			new RocksDBStateBackend(new MemoryStateBackend());
	env.setStateBackend(rocksBackend);
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	env
			.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.keyBy(0)
			.transform(
					"timely_stateful_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new TimelyStatefulOperator()).uid("TimelyStatefulOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>());

	executeAndSavepoint(
			env,
			"src/test/resources/" + getRocksDBSavepointPath(),
			new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:StatefulJobSavepointFrom12MigrationITCase.java

示例3: testCreateSavepointOnFlink11

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
 * This has to be manually executed to create the savepoint on Flink 1.1.
 */
@Test
@Ignore
public void testCreateSavepointOnFlink11() throws Exception {

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new MemoryStateBackend());
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	executeAndSavepoint(
			env,
			"src/test/resources/stateful-udf-migration-itcase-flink1.1-savepoint",
			new Tuple2<>(EXPECTED_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:36,代码来源:StatefulJobSavepointFrom11MigrationITCase.java

示例4: testCreateSavepointOnFlink11WithRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
	 * This has to be manually executed to create the savepoint on Flink 1.1.
	 */
	@Test
	@Ignore
	public void testCreateSavepointOnFlink11WithRocksDB() throws Exception {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		RocksDBStateBackend rocksBackend =
				new RocksDBStateBackend(new MemoryStateBackend());
//		rocksBackend.enableFullyAsyncSnapshots();
		env.setStateBackend(rocksBackend);
		env.enableCheckpointing(500);
		env.setParallelism(4);
		env.setMaxParallelism(4);

		// create source
		env
				.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
				.flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
				.keyBy(0)
				.flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
				.keyBy(0)
				.flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
				.keyBy(0)
				.transform(
						"custom_operator",
						new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
						new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
				.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

		executeAndSavepoint(
				env,
				"src/test/resources/stateful-udf-migration-itcase-flink1.1-rocksdb-savepoint",
				new Tuple2<>(EXPECTED_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:StatefulJobSavepointFrom11MigrationITCase.java

示例5: testSavepointRestoreFromFlink11

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink11() throws Exception {

	final int expectedSuccessfulChecks = 21;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new MemoryStateBackend());
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	restoreAndExecute(
			env,
			getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint"),
			new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, expectedSuccessfulChecks));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:StatefulJobSavepointFrom11MigrationITCase.java

示例6: testSavepointRestoreFromFlink11FromRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {

	final int expectedSuccessfulChecks = 21;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	restoreAndExecute(
			env,
			getResourceFilename("stateful-udf-migration-itcase-flink1.1-rocksdb-savepoint"),
			new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, expectedSuccessfulChecks));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:StatefulJobSavepointFrom11MigrationITCase.java

示例7: testCreateSavepointOnFlink11WithRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
	 * This has to be manually executed to create the savepoint on Flink 1.1.
	 */
	@Test
	@Ignore
	public void testCreateSavepointOnFlink11WithRocksDB() throws Exception {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		RocksDBStateBackend rocksBackend =
				new RocksDBStateBackend(new MemoryStateBackend());
//		rocksBackend.enableFullyAsyncSnapshots();
		env.setStateBackend(rocksBackend);
		env.enableCheckpointing(500);
		env.setParallelism(4);
		env.setMaxParallelism(4);

		// create source
		env
				.addSource(new LegacyCheckpointedSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
				.flatMap(new LegacyCheckpointedFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
				.keyBy(0)
				.flatMap(new LegacyCheckpointedFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
				.keyBy(0)
				.flatMap(new KeyedStateSettingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
				.keyBy(0)
				.transform(
						"custom_operator",
						new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
						new CheckpointedUdfOperator(new LegacyCheckpointedFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
				.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

		executeAndSavepoint(
				env,
				"src/test/resources/stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb",
				new Tuple2<>(EXPECTED_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:StatefulUDFSavepointMigrationITCase.java

示例8: testSavepointRestoreFromFlink11

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink11() throws Exception {

	final int EXPECTED_SUCCESSFUL_CHECKS = 21;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new MemoryStateBackend());
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	restoreAndExecute(
			env,
			getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint"),
			new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:StatefulUDFSavepointMigrationITCase.java

示例9: testSavepointRestoreFromFlink11FromRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink11FromRocksDB() throws Exception {

	final int EXPECTED_SUCCESSFUL_CHECKS = 21;

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	// we only test memory state backend yet
	env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	// create source
	env
			.addSource(new RestoringCheckingSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new RestoringCheckingFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new RestoringCheckingFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new KeyedStateCheckingFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new RestoringCheckingUdfOperator(new RestoringCheckingFlatMapWithKeyedState())).uid("LegacyCheckpointedOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>(EXPECTED_ELEMENTS_ACCUMULATOR));

	restoreAndExecute(
			env,
			getResourceFilename("stateful-udf-migration-itcase-flink1.1-savepoint-rocksdb"),
			new Tuple2<>(SUCCESSFUL_CHECK_ACCUMULATOR, EXPECTED_SUCCESSFUL_CHECKS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:StatefulUDFSavepointMigrationITCase.java

示例10: testSavepointRestoreFromFlink12

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink12() throws Exception {

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	env.setStateBackend(new MemoryStateBackend());
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	env
			.addSource(new CheckingRestoringSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new CheckingRestoringFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new CheckingRestoringFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new CheckingKeyedStateFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckingRestoringUdfOperator(new CheckingRestoringFlatMapWithKeyedStateInOperator())).uid("LegacyCheckpointedOperator")
			.keyBy(0)
			.transform(
					"timely_stateful_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckingTimelyStatefulOperator()).uid("TimelyStatefulOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>());

	restoreAndExecute(
			env,
			getResourceFilename(getSavepointPath()),
			new Tuple2<>(CheckingRestoringSource.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1),
			new Tuple2<>(CheckingRestoringFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingRestoringFlatMapWithKeyedState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingRestoringUdfOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingRestoringFlatMapWithKeyedStateInOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:45,代码来源:StatefulJobSavepointFrom12MigrationITCase.java

示例11: testSavepointRestoreFromFlink12FromRocksDB

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSavepointRestoreFromFlink12FromRocksDB() throws Exception {

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()));
	env.enableCheckpointing(500);
	env.setParallelism(4);
	env.setMaxParallelism(4);

	env
			.addSource(new CheckingRestoringSource(NUM_SOURCE_ELEMENTS)).setMaxParallelism(1).uid("LegacyCheckpointedSource")
			.flatMap(new CheckingRestoringFlatMap()).startNewChain().uid("LegacyCheckpointedFlatMap")
			.keyBy(0)
			.flatMap(new CheckingRestoringFlatMapWithKeyedState()).startNewChain().uid("LegacyCheckpointedFlatMapWithKeyedState")
			.keyBy(0)
			.flatMap(new CheckingKeyedStateFlatMap()).startNewChain().uid("KeyedStateSettingFlatMap")
			.keyBy(0)
			.transform(
					"custom_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckingRestoringUdfOperator(new CheckingRestoringFlatMapWithKeyedStateInOperator())).uid("LegacyCheckpointedOperator")
			.keyBy(0)
			.transform(
					"timely_stateful_operator",
					new TypeHint<Tuple2<Long, Long>>() {}.getTypeInfo(),
					new CheckingTimelyStatefulOperator()).uid("TimelyStatefulOperator")
			.addSink(new AccumulatorCountingSink<Tuple2<Long, Long>>());

	restoreAndExecute(
			env,
			getResourceFilename(getRocksDBSavepointPath()),
			new Tuple2<>(CheckingRestoringSource.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, 1),
			new Tuple2<>(CheckingRestoringFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingRestoringFlatMapWithKeyedState.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingKeyedStateFlatMap.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingRestoringUdfOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingRestoringFlatMapWithKeyedStateInOperator.SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(CheckingTimelyStatefulOperator.SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, NUM_SOURCE_ELEMENTS),
			new Tuple2<>(AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, NUM_SOURCE_ELEMENTS));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:45,代码来源:StatefulJobSavepointFrom12MigrationITCase.java

示例12: doTestTumblingTimeWindowWithKVState

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public void doTestTumblingTimeWindowWithKVState(int maxParallelism) {
	final int NUM_ELEMENTS_PER_KEY = 3000;
	final int WINDOW_SIZE = 100;
	final int NUM_KEYS = 100;
	FailingSource.reset();

	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
				"localhost", cluster.getLeaderRPCPort());

		env.setParallelism(PARALLELISM);
		env.setMaxParallelism(maxParallelism);
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		env.enableCheckpointing(100);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0));
		env.getConfig().disableSysoutLogging();
		env.setStateBackend(this.stateBackend);

		env
				.addSource(new FailingSource(NUM_KEYS, NUM_ELEMENTS_PER_KEY, NUM_ELEMENTS_PER_KEY / 3))
				.rebalance()
				.keyBy(0)
				.timeWindow(Time.of(WINDOW_SIZE, MILLISECONDS))
				.apply(new RichWindowFunction<Tuple2<Long, IntType>, Tuple4<Long, Long, Long, IntType>, Tuple, TimeWindow>() {

					private boolean open = false;

					private ValueState<Integer> count;

					@Override
					public void open(Configuration parameters) {
						assertEquals(PARALLELISM, getRuntimeContext().getNumberOfParallelSubtasks());
						open = true;
						count = getRuntimeContext().getState(
								new ValueStateDescriptor<>("count", Integer.class, 0));
					}

					@Override
					public void apply(
							Tuple tuple,
							TimeWindow window,
							Iterable<Tuple2<Long, IntType>> values,
							Collector<Tuple4<Long, Long, Long, IntType>> out) throws Exception {

						// the window count state starts with the key, so that we get
						// different count results for each key
						if (count.value() == 0) {
							count.update(tuple.<Long>getField(0).intValue());
						}

						// validate that the function has been opened properly
						assertTrue(open);

						count.update(count.value() + 1);
						out.collect(new Tuple4<>(tuple.<Long>getField(0), window.getStart(), window.getEnd(), new IntType(count.value())));
					}
				})
				.addSink(new CountValidatingSink(NUM_KEYS, NUM_ELEMENTS_PER_KEY / WINDOW_SIZE)).setParallelism(1);


		tryExecute(env, "Tumbling Window Test");
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:68,代码来源:EventTimeWindowCheckpointingITCase.java

示例13: testSlidingTimeWindow

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testSlidingTimeWindow() {
	final int NUM_ELEMENTS_PER_KEY = 3000;
	final int WINDOW_SIZE = 1000;
	final int WINDOW_SLIDE = 100;
	final int NUM_KEYS = 100;
	FailingSource.reset();

	try {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
				"localhost", cluster.getLeaderRPCPort());

		env.setMaxParallelism(2 * PARALLELISM);
		env.setParallelism(PARALLELISM);
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		env.enableCheckpointing(100);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 0));
		env.getConfig().disableSysoutLogging();
		env.setStateBackend(this.stateBackend);

		env
				.addSource(new FailingSource(NUM_KEYS, NUM_ELEMENTS_PER_KEY, NUM_ELEMENTS_PER_KEY / 3))
				.rebalance()
				.keyBy(0)
				.timeWindow(Time.of(WINDOW_SIZE, MILLISECONDS), Time.of(WINDOW_SLIDE, MILLISECONDS))
				.apply(new RichWindowFunction<Tuple2<Long, IntType>, Tuple4<Long, Long, Long, IntType>, Tuple, TimeWindow>() {

					private boolean open = false;

					@Override
					public void open(Configuration parameters) {
						assertEquals(PARALLELISM, getRuntimeContext().getNumberOfParallelSubtasks());
						open = true;
					}

					@Override
					public void apply(
							Tuple tuple,
							TimeWindow window,
							Iterable<Tuple2<Long, IntType>> values,
							Collector<Tuple4<Long, Long, Long, IntType>> out) {

						// validate that the function has been opened properly
						assertTrue(open);

						int sum = 0;
						long key = -1;

						for (Tuple2<Long, IntType> value : values) {
							sum += value.f1.value;
							key = value.f0;
						}
						out.collect(new Tuple4<>(key, window.getStart(), window.getEnd(), new IntType(sum)));
					}
				})
				.addSink(new ValidatingSink(NUM_KEYS, NUM_ELEMENTS_PER_KEY / WINDOW_SLIDE)).setParallelism(1);


		tryExecute(env, "Tumbling Window Test");
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:66,代码来源:EventTimeWindowCheckpointingITCase.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setMaxParallelism方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。