当前位置: 首页>>代码示例>>Java>>正文


Java AbstractStreamOperatorTestHarness.open方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness.open方法的典型用法代码示例。如果您正苦于以下问题:Java AbstractStreamOperatorTestHarness.open方法的具体用法?Java AbstractStreamOperatorTestHarness.open怎么用?Java AbstractStreamOperatorTestHarness.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness的用法示例。


在下文中一共展示了AbstractStreamOperatorTestHarness.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRestore

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
@Test
public void testRestore() throws Exception {
	final DummyFlinkKinesisConsumer<String> consumerFunction = new DummyFlinkKinesisConsumer<>(mock(KinesisDataFetcher.class));

	StreamSource<String, DummyFlinkKinesisConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setup();
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		"src/test/resources/kinesis-consumer-migration-test-flink" + testMigrateVersion + "-snapshot", testMigrateVersion);
	testHarness.open();

	// assert that state is correctly restored
	assertNotEquals(null, consumerFunction.getRestoredState());
	assertEquals(1, consumerFunction.getRestoredState().size());
	assertEquals(TEST_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:FlinkKinesisConsumerMigrationTest.java

示例2: testRestoreFromEmptyStateNoPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(
				Collections.<KafkaTopicPartition>emptyList(),
				FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"),
		testMigrateVersion);

	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState().isEmpty());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:39,代码来源:FlinkKafkaConsumerBaseMigrationTest.java

示例3: testUDFReturningNull

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
@Test
public void testUDFReturningNull() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(null);
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例4: testRestoreFromEmptyStateNoPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/**
 * Test restoring from an legacy empty state, when no partitions could be found for topics.
 */
@Test
public void testRestoreFromEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
			new DummyFlinkKafkaConsumer<>(Collections.<KafkaTopicPartition>emptyList());

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file
	testHarness.initializeState(
			OperatorSnapshotUtil.readStateHandle(
					OperatorSnapshotUtil.getResourceFilename("kafka-consumer-migration-test-flink1.2-empty-state-snapshot")));
	testHarness.open();

	// assert that no partitions were found and is empty
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:33,代码来源:FlinkKafkaConsumerBaseFrom12MigrationTest.java

示例5: testUDFReturningData

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
@Test
public void testUDFReturningData() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(Arrays.asList(1, 2, 3));
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例6: testRestoreFromFlink11WithEmptyStateNoPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/** Test restoring from an legacy empty state, when no partitions could be found for topics. */
@Test
public void testRestoreFromFlink11WithEmptyStateNoPartitions() throws Exception {
	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(Collections.<KafkaTopicPartition>emptyList());

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file using legacy method
	testHarness.initializeStateFromLegacyCheckpoint(
		getResourceFilename("kafka-consumer-migration-test-flink1.1-empty-state-snapshot"));
	testHarness.open();

	// assert that no partitions were found and is empty
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// assert that no state was restored
	Assert.assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:30,代码来源:FlinkKafkaConsumerBaseFrom11MigrationTest.java

示例7: testRestoreFromFlink11WithEmptyStateWithPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/** Test restoring from an empty state taken using Flink 1.1, when some partitions could be found for topics. */
@Test
public void testRestoreFromFlink11WithEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>();
	partitions.add(new KafkaTopicPartition("abc", 13));
	partitions.add(new KafkaTopicPartition("def", 7));

	final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file using legacy method
	testHarness.initializeStateFromLegacyCheckpoint(
		getResourceFilename("kafka-consumer-migration-test-flink1.1-empty-state-snapshot"));
	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.1-empty-state-snapshot";
	// since the state is empty, the consumer should reflect on the startup mode to determine start offsets.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	expectedSubscribedPartitionsWithStartOffsets.put(new KafkaTopicPartition("abc", 13), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
	expectedSubscribedPartitionsWithStartOffsets.put(new KafkaTopicPartition("def", 7), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);

	// assert that there are partitions and is identical to expected list
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	Assert.assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	Assert.assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that no state was restored
	Assert.assertTrue(consumerFunction.getRestoredState() == null);

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:FlinkKafkaConsumerBaseFrom11MigrationTest.java

示例8: testValueEmission

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/**
 * Creates a {@link UnboundedSourceWrapper} that has one or multiple readers per source.
 * If numSplits > numTasks the source has one source will manage multiple readers.
 */
@Test
public void testValueEmission() throws Exception {
  final int numElements = 20;
  final Object checkpointLock = new Object();
  PipelineOptions options = PipelineOptionsFactory.create();

  // this source will emit exactly NUM_ELEMENTS across all parallel readers,
  // afterwards it will stall. We check whether we also receive NUM_ELEMENTS
  // elements later.
  TestCountingSource source = new TestCountingSource(numElements);
  UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
      new UnboundedSourceWrapper<>("stepName", options, source, numSplits);

  assertEquals(numSplits, flinkWrapper.getSplitSources().size());

  StreamSource<WindowedValue<
      ValueWithRecordId<KV<Integer, Integer>>>,
      UnboundedSourceWrapper<
          KV<Integer, Integer>,
          TestCountingSource.CounterMark>> sourceOperator = new StreamSource<>(flinkWrapper);

  AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
      testHarness =
      new AbstractStreamOperatorTestHarness<>(
          sourceOperator,
          numTasks /* max parallelism */,
          numTasks /* parallelism */,
          0 /* subtask index */);

  testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

  try {
    testHarness.open();
    sourceOperator.run(checkpointLock,
        new TestStreamStatusMaintainer(),
        new Output<StreamRecord<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>>() {
          private int count = 0;

          @Override
          public void emitWatermark(Watermark watermark) {
          }

          @Override
          public <X> void collect(OutputTag<X> outputTag, StreamRecord<X> streamRecord) {
            collect((StreamRecord) streamRecord);
          }

          @Override
          public void emitLatencyMarker(LatencyMarker latencyMarker) {
          }

          @Override
          public void collect(StreamRecord<WindowedValue<
              ValueWithRecordId<KV<Integer, Integer>>>> windowedValueStreamRecord) {

            count++;
            if (count >= numElements) {
              throw new SuccessException();
            }
          }

          @Override
          public void close() {

          }
        });
  } catch (SuccessException e) {

    assertEquals(Math.max(1, numSplits / numTasks), flinkWrapper.getLocalSplitSources().size());

    // success
    return;
  }
  fail("Read terminated without producing expected number of outputs");
}
 
开发者ID:apache,项目名称:beam,代码行数:80,代码来源:UnboundedSourceWrapperTest.java

示例9: testNullCheckpoint

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
@Test
public void testNullCheckpoint() throws Exception {
  final int numElements = 20;
  PipelineOptions options = PipelineOptionsFactory.create();

  TestCountingSource source = new TestCountingSource(numElements) {
    @Override
    public Coder<CounterMark> getCheckpointMarkCoder() {
      return null;
    }
  };

  UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark> flinkWrapper =
      new UnboundedSourceWrapper<>("stepName", options, source, numSplits);

  StreamSource<
      WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
      UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
      sourceOperator = new StreamSource<>(flinkWrapper);

  AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
      testHarness =
      new AbstractStreamOperatorTestHarness<>(
          sourceOperator,
          numTasks /* max parallelism */,
          numTasks /* parallelism */,
          0 /* subtask index */);

  testHarness.setTimeCharacteristic(TimeCharacteristic.EventTime);

  testHarness.open();

  OperatorStateHandles snapshot = testHarness.snapshot(0, 0);

  UnboundedSourceWrapper<
      KV<Integer, Integer>, TestCountingSource.CounterMark> restoredFlinkWrapper =
      new UnboundedSourceWrapper<>(
          "stepName", options, new TestCountingSource(numElements), numSplits);

  StreamSource<
      WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>,
      UnboundedSourceWrapper<KV<Integer, Integer>, TestCountingSource.CounterMark>>
      restoredSourceOperator =
      new StreamSource<>(restoredFlinkWrapper);

  // set parallelism to 1 to ensure that our testing operator gets all checkpointed state
  AbstractStreamOperatorTestHarness<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>>
      restoredTestHarness =
      new AbstractStreamOperatorTestHarness<>(
          restoredSourceOperator,
          numTasks /* max parallelism */,
          1 /* parallelism */,
          0 /* subtask index */);

  restoredTestHarness.setup();
  restoredTestHarness.initializeState(snapshot);
  restoredTestHarness.open();

  // when the source checkpointed a null we don't re-initialize the splits, that is we
  // will have no splits.
  assertEquals(0, restoredFlinkWrapper.getLocalSplitSources().size());

}
 
开发者ID:apache,项目名称:beam,代码行数:64,代码来源:UnboundedSourceWrapperTest.java

示例10: testCheckpointAndRestore

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
@Test
public void testCheckpointAndRestore() {
	try {
		final int numElements = 10000;

		List<Integer> data = new ArrayList<Integer>(numElements);
		List<Integer> result = new ArrayList<Integer>(numElements);

		for (int i = 0; i < numElements; i++) {
			data.add(i);
		}

		final FromElementsFunction<Integer> source = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> src = new StreamSource<>(source);
		AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
		testHarness.open();

		final SourceFunction.SourceContext<Integer> ctx = new ListSourceContext<Integer>(result, 2L);

		final Throwable[] error = new Throwable[1];

		// run the source asynchronously
		Thread runner = new Thread() {
			@Override
			public void run() {
				try {
					source.run(ctx);
				}
				catch (Throwable t) {
					error[0] = t;
				}
			}
		};
		runner.start();

		// wait for a bit
		Thread.sleep(1000);

		// make a checkpoint
		List<Integer> checkpointData = new ArrayList<>(numElements);
		OperatorStateHandles handles = null;
		synchronized (ctx.getCheckpointLock()) {
			handles = testHarness.snapshot(566, System.currentTimeMillis());
			checkpointData.addAll(result);
		}

		// cancel the source
		source.cancel();
		runner.join();

		// check for errors
		if (error[0] != null) {
			System.err.println("Error in asynchronous source runner");
			error[0].printStackTrace();
			fail("Error in asynchronous source runner");
		}

		final FromElementsFunction<Integer> sourceCopy = new FromElementsFunction<>(IntSerializer.INSTANCE, data);
		StreamSource<Integer, FromElementsFunction<Integer>> srcCopy = new StreamSource<>(sourceCopy);
		AbstractStreamOperatorTestHarness<Integer> testHarnessCopy =
			new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
		testHarnessCopy.setup();
		testHarnessCopy.initializeState(handles);
		testHarnessCopy.open();

		// recovery run
		SourceFunction.SourceContext<Integer> newCtx = new ListSourceContext<>(checkpointData);

		sourceCopy.run(newCtx);

		assertEquals(data, checkpointData);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:79,代码来源:FromElementsFunctionTest.java

示例11: testRestoreFromEmptyStateWithPartitions

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/**
 * Test restoring from an empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestoreFromEmptyStateWithPartitions() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot"),
		testMigrateVersion);

	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.2-snapshot-empty-state";
	// all new partitions after the snapshot are considered as partitions that were created while the
	// consumer wasn't running, and should start from the earliest offset.
	final HashMap<KafkaTopicPartition, Long> expectedSubscribedPartitionsWithStartOffsets = new HashMap<>();
	for (KafkaTopicPartition partition : PARTITION_STATE.keySet()) {
		expectedSubscribedPartitionsWithStartOffsets.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET);
	}

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	assertEquals(expectedSubscribedPartitionsWithStartOffsets, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// the new partitions should have been considered as restored state
	assertTrue(consumerFunction.getRestoredState() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());
	for (Map.Entry<KafkaTopicPartition, Long> expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) {
		assertEquals(expectedEntry.getValue(), consumerFunction.getRestoredState().get(expectedEntry.getKey()));
	}

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:54,代码来源:FlinkKafkaConsumerBaseMigrationTest.java

示例12: testRestore

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/**
 * Test restoring from a non-empty state taken using a previous Flink version, when some partitions could be
 * found for topics.
 */
@Test
public void testRestore() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet());

	final DummyFlinkKafkaConsumer<String> consumerFunction =
		new DummyFlinkKafkaConsumer<>(partitions, FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
			new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
			new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();

	// restore state from binary snapshot file
	MigrationTestUtil.restoreFromSnapshot(
		testHarness,
		OperatorSnapshotUtil.getResourceFilename(
			"kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot"),
		testMigrateVersion);

	testHarness.open();

	// assert that there are partitions and is identical to expected list
	assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	assertTrue(consumerFunction.getRestoredState() != null);
	assertEquals(PARTITION_STATE, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:45,代码来源:FlinkKafkaConsumerBaseMigrationTest.java

示例13: writeMonitoringSourceSnapshot

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeMonitoringSourceSnapshot() throws Exception {

	File testFolder = tempFolder.newFolder();

	long fileModTime = Long.MIN_VALUE;
	for (int i = 0; i < 1; i++) {
		Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line.");
		fileModTime = file.f0.lastModified();
	}

	TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath()));

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src =
		new StreamSource<>(monitoringFunction);

	final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness =
			new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);

	testHarness.open();

	final Throwable[] error = new Throwable[1];

	final OneShotLatch latch = new OneShotLatch();

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				monitoringFunction.run(new DummySourceContext() {
					@Override
					public void collect(TimestampedFileInputSplit element) {
						latch.trigger();
					}

					@Override
					public void markAsTemporarilyIdle() {

					}
				});
			}
			catch (Throwable t) {
				t.printStackTrace();
				error[0] = t;
			}
		}
	};
	runner.start();

	if (!latch.isTriggered()) {
		latch.await();
	}

	final OperatorStateHandles snapshot;
	synchronized (testHarness.getCheckpointLock()) {
		snapshot = testHarness.snapshot(0L, 0L);
	}

	OperatorSnapshotUtil.writeStateHandle(
			snapshot,
			"src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink1.2-snapshot");

	monitoringFunction.cancel();
	runner.join();

	testHarness.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:76,代码来源:ContinuousFileProcessingFrom12MigrationTest.java

示例14: testRestoreFromFlink11

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
/** Test restoring from a non-empty state taken using Flink 1.1, when some partitions could be found for topics. */
@Test
public void testRestoreFromFlink11() throws Exception {
	final List<KafkaTopicPartition> partitions = new ArrayList<>();
	partitions.add(new KafkaTopicPartition("abc", 13));
	partitions.add(new KafkaTopicPartition("def", 7));

	final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions);

	StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator =
		new StreamSource<>(consumerFunction);

	final AbstractStreamOperatorTestHarness<String> testHarness =
		new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0);

	testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	testHarness.setup();
	// restore state from binary snapshot file using legacy method
	testHarness.initializeStateFromLegacyCheckpoint(
		getResourceFilename("kafka-consumer-migration-test-flink1.1-snapshot"));
	testHarness.open();

	// the expected state in "kafka-consumer-migration-test-flink1.1-snapshot"
	final HashMap<KafkaTopicPartition, Long> expectedState = new HashMap<>();
	expectedState.put(new KafkaTopicPartition("abc", 13), 16768L);
	expectedState.put(new KafkaTopicPartition("def", 7), 987654321L);

	// assert that there are partitions and is identical to expected list
	Assert.assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null);
	Assert.assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty());

	// on restore, subscribedPartitionsToStartOffsets should be identical to the restored state
	Assert.assertEquals(expectedState, consumerFunction.getSubscribedPartitionsToStartOffsets());

	// assert that state is correctly restored from legacy checkpoint
	Assert.assertTrue(consumerFunction.getRestoredState() != null);
	Assert.assertEquals(expectedState, consumerFunction.getRestoredState());

	consumerOperator.close();
	consumerOperator.cancel();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:43,代码来源:FlinkKafkaConsumerBaseFrom11MigrationTest.java

示例15: testFunctionRestore

import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; //导入方法依赖的package包/类
@Test
public void testFunctionRestore() throws Exception {
	String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/";

	org.apache.hadoop.fs.Path path = null;
	long fileModTime = Long.MIN_VALUE;
	for (int i = 0; i < 1; i++) {
		Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line.");
		path = file.f0;
		fileModTime = hdfs.getFileStatus(file.f0).getModificationTime();
	}

	TextInputFormat format = new TextInputFormat(new Path(testBasePath));

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src =
		new StreamSource<>(monitoringFunction);

	final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness =
		new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0);
	testHarness.open();

	final Throwable[] error = new Throwable[1];

	final OneShotLatch latch = new OneShotLatch();

	final DummySourceContext sourceContext = new DummySourceContext() {
		@Override
		public void collect(TimestampedFileInputSplit element) {
			latch.trigger();
		}
	};

	// run the source asynchronously
	Thread runner = new Thread() {
		@Override
		public void run() {
			try {
				monitoringFunction.run(sourceContext);
			}
			catch (Throwable t) {
				t.printStackTrace();
				error[0] = t;
			}
		}
	};
	runner.start();

	// first condition for the source to have updated its state: emit at least one element
	if (!latch.isTriggered()) {
		latch.await();
	}

	// second condition for the source to have updated its state: it's not on the lock anymore,
	// this means it has processed all the splits and updated its state.
	synchronized (sourceContext.getCheckpointLock()) {}

	OperatorStateHandles snapshot = testHarness.snapshot(0, 0);
	monitoringFunction.cancel();
	runner.join();

	testHarness.close();

	final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy =
		new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL);

	StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy =
		new StreamSource<>(monitoringFunctionCopy);

	AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy =
		new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0);
	testHarnessCopy.initializeState(snapshot);
	testHarnessCopy.open();

	Assert.assertNull(error[0]);
	Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime());

	hdfs.delete(path, false);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:82,代码来源:ContinuousFileProcessingTest.java


注:本文中的org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness.open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。