当前位置: 首页>>代码示例>>Java>>正文


Java AbstractStreamOperatorTestHarness类代码示例

本文整理汇总了Java中org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness的典型用法代码示例。如果您正苦于以下问题:Java AbstractStreamOperatorTestHarness类的具体用法?Java AbstractStreamOperatorTestHarness怎么用?Java AbstractStreamOperatorTestHarness使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


AbstractStreamOperatorTestHarness类属于org.apache.flink.streaming.util包,在下文中一共展示了AbstractStreamOperatorTestHarness类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRestoreFromEmptyStateWithPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/**
 * Test restoring from an empty state taken using Flink 1.2, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file
	testHarness.initializeState(
			OperatorSnapshotUtil.readStateHandle(
					OperatorSnapshotUtil.getResourceFilename("kafka-consumer-migration-test-flink1.2-empty-state-snapshot")));
	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.2-empty-state-snapshot";
	// since the state is empty, the consumer should reflect on the startup mode to determine start offsets.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	Assert.assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:43,代码来源:FlinkKafkaConsumerBaseFrom12MigrationTest.java

示例2: testRestoreWithEmptyState

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
@Test
public void testRestoreWithEmptyState() throws Exception {
	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(mock(KinesisDataFetcher.class));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		"src/test/resources/kinesis-consumer-migration-test-flink" + testMigrateVersion + "-empty-snapshot", testMigrateVersion);
	testHarness.open();

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:22,代码来源:FlinkKinesisConsumerMigrationTest.java

示例3: testRestore

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
@Test
public void testRestore() throws Exception {
	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(mock(KinesisDataFetcher.class));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		"src/test/resources/kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot", testMigrateVersion);
	testHarness.open();

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:FlinkKinesisConsumerMigrationTest.java

示例4: testRestoreFromEmptyStateNoPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"),
		testMigrateVersion);

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:FlinkKafkaConsumerBaseMigrationTest.java

示例5: testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/**
 * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled.
 */
@Test
public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception {
	assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2);

	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(partitions, 1000L); // discovery enabled

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file; should fail since discovery is enabled
	try {
		MigrationTestUtil.restoreFromSnapshot(
			testHarness,
			OperatorSnapshotUtil.getResourceFilename(
				"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"),
			testMigrateVersion);

		fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled.");
	} catch (Exception e) {
		if (testMigrateVersion == MigrationVersion.v1_1) {
			Assert.assertTrue(e.getCause() instanceof IllegalArgumentException);
		} else {
			Assert.assertTrue(e instanceof IllegalArgumentException);
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:40,代码来源:FlinkKafkaConsumerBaseMigrationTest.java

示例6: createTestHarness

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
private static <T> AbstractStreamOperatorTestHarness<T> createTestHarness(
	SourceFunction<T> source, int numSubtasks, int subtaskIndex) throws Exception {

	AbstractStreamOperatorTestHarness<T> testHarness =
		new AbstractStreamOperatorTestHarness<>(
			new StreamSource<>(source), Short.MAX_VALUE / 2, numSubtasks, subtaskIndex);

	testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

	return testHarness;
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:12,代码来源:FlinkKafkaConsumerBaseTest.java

示例7: testRestoreFromEmptyStateNoPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(Collections.<KafkaTopicPartition>emptyList());

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file
	testHarness.initializeState(
			OperatorSnapshotUtil.readStateHandle(
					OperatorSnapshotUtil.getResourceFilename("kafka-consumer-migration-test-flink1.2-empty-state-snapshot")));
	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:FlinkKafkaConsumerBaseFrom12MigrationTest.java

示例8: testRestore

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/**
 * Test restoring from a non-empty state taken using Flink 1.2, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file
	testHarness.initializeState(
			OperatorSnapshotUtil.readStateHandle(
					OperatorSnapshotUtil.getResourceFilename("kafka-consumer-migration-test-flink1.2-snapshot")));
	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	Assert.assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	Assert.assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:40,代码来源:FlinkKafkaConsumerBaseFrom12MigrationTest.java

示例9: testRestoreFromFlink11WithEmptyStateNoPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/** Test restoring from an legacy empty state, when no partitions could be found for topics. */
@Test
public void testRestoreFromFlink11WithEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(Collections.<KafkaTopicPartition>emptyList());

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file using legacy method
	testHarness.initializeStateFromLegacyCheckpoint(
		getResourceFilename("kafka-consumer-migration-test-flink1.1-empty-state-snapshot"));
	testHarness.open();

	// assert that no partitions were found and is empty
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	Assert.assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:30,代码来源:FlinkKafkaConsumerBaseFrom11MigrationTest.java

示例10: testRestoreFromFlink11WithEmptyStateWithPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/** Test restoring from an empty state taken using Flink 1.1, when some partitions could be found for topics. */
@Test
public void testRestoreFromFlink11WithEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>();
	partitions.add(new KafkaTopicPartition("abc", 13));
	partitions.add(new KafkaTopicPartition("def", 7));

	final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file using legacy method
	testHarness.initializeStateFromLegacyCheckpoint(
		getResourceFilename("kafka-consumer-migration-test-flink1.1-empty-state-snapshot"));
	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.1-empty-state-snapshot";
	// since the state is empty, the consumer should reflect on the startup mode to determine start offsets.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	expectedSubscribedPartitionsWithStartOffsets.put(new KafkaTopicPartition("abc", 13), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
	expectedSubscribedPartitionsWithStartOffsets.put(new KafkaTopicPartition("def", 7), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);

	// assert that there are partitions and is identical to expected list
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	Assert.assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	Assert.assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that no state was restored
	Assert.assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:FlinkKafkaConsumerBaseFrom11MigrationTest.java

示例11: testUDFReturningNull

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
@Test
public void testUDFReturningNull() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(null);
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例12: testUDFReturningEmpty

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
@Test
public void testUDFReturningEmpty() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(Collections.<Integer>emptyList());
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例13: testUDFReturningData

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
@Test
public void testUDFReturningData() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(Arrays.asList(1, 2, 3));
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例14: testValueEmission

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
/**
 * Creates a {@link UnboundedSourceWrapper} that has one or multiple readers per source.
 * If numSplits > numTasks the source has one source will manage multiple readers.
 */
@Test
public void testValueEmission() throws Exception {
  final int numElements = 20;
  final Object checkpointLock = new Object();
  PipelineOptions options = PipelineOptionsFactory.create();

  // this source will emit exactly NUM_ELEMENTS across all parallel readers,
  // afterwards it will stall. We check whether we also receive NUM_ELEMENTS
  // elements later.
  TestCountingSource source = new TestCountingSource(numElements);
  UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
      new UnboundedSourceWrapper<>("stepName", options, source, numSplits);

  assertEquals(numSplits, flinkWrapper.getSplitSources().size());

  StreamSource<WindowedValue<
      ValueWithRecordId<KV<Integer, Integer>>>,
      UnboundedSourceWrapper<
          KV<Integer, Integer>,
          TestCountingSource.CounterMark>> sourceOperator = new StreamSource<>(flinkWrapper);

  AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
      testHarness =
      new AbstractStreamOperatorTestHarness<>(
          sourceOperator,
          numTasks /* max parallelism */,
          numTasks /* parallelism */,
          0 /* subtask index */);

  testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

  try {
    testHarness.open();
    sourceOperator.run(checkpointLock,
        new TestStreamStatusMaintainer(),
        new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
          private int count = 0;

          @Override
          public void emitWatermark(Watermark watermark) {
          }

          @Override
          public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
            collect((StreamRecord) streamRecord);
          }

          @Override
          public void emitLatencyMarker(LatencyMarker latencyMarker) {
          }

          @Override
          public void collect(StreamRecord<WindowedValue<
              ValueWithRecordId<KV<Integer, Integer>>>> windowedValueStreamRecord) {

            count++;
            if (count >= numElements) {
              throw new SuccessException();
            }
          }

          @Override
          public void close() {

          }
        });
  } catch (SuccessException e) {

    assertEquals(Math.max(1, numSplits / numTasks), flinkWrapper.getLocalSplitSources().size());

    // success
    return;
  }
  fail("Read terminated without producing expected number of outputs");
}
 
开发者ID:apache,项目名称:beam,代码行数:80,代码来源:UnboundedSourceWrapperTest.java

示例15: testNullCheckpoint

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入依赖的package包/类
@Test
public void testNullCheckpoint() throws Exception {
  final int numElements = 20;
  PipelineOptions options = PipelineOptionsFactory.create();

  TestCountingSource source = new TestCountingSource(numElements) {
    @Override
    public Coder<CounterMark> getCheckpointMarkCoder() {
      return null;
    }
  };

  UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
      new UnboundedSourceWrapper<>("stepName", options, source, numSplits);

  StreamSource<
      WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
      UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
      sourceOperator = new StreamSource<>(flinkWrapper);

  AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
      testHarness =
      new AbstractStreamOperatorTestHarness<>(
          sourceOperator,
          numTasks /* max parallelism */,
          numTasks /* parallelism */,
          0 /* subtask index */);

  testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

  testHarness.open();

  OperatorStateHandles snapshot = testHarness.snapshot(0, 0);

  UnboundedSourceWrapper<
      KV<Integer, Integer>, TestCountingSource.CounterMark> restoredFlinkWrapper =
      new UnboundedSourceWrapper<>(
          "stepName", options, new TestCountingSource(numElements), numSplits);

  StreamSource<
      WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
      UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
      restoredSourceOperator =
      new StreamSource<>(restoredFlinkWrapper);

  // set parallelism to 1 to ensure that our testing operator gets all checkpointed state
  AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
      restoredTestHarness =
      new AbstractStreamOperatorTestHarness<>(
          restoredSourceOperator,
          numTasks /* max parallelism */,
          1 /* parallelism */,
          0 /* subtask index */);

  restoredTestHarness.setup();
  restoredTestHarness.initializeState(snapshot);
  restoredTestHarness.open();

  // when the source checkpointed a null we don't re-initialize the splits, that is we
  // will have no splits.
  assertEquals(0, restoredFlinkWrapper.getLocalSplitSources().size());

}
 
开发者ID:apache,项目名称:beam,代码行数:64,代码来源:UnboundedSourceWrapperTest.java


注:本文中的org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。