当前位置: 首页>>代码示例>>Java>>正文


Java OperatorStateHandles类代码示例

本文整理汇总了Java中org.apache.flink.streaming.runtime.tasks.OperatorStateHandles的典型用法代码示例。如果您正苦于以下问题:Java OperatorStateHandles类的具体用法?Java OperatorStateHandles怎么用?Java OperatorStateHandles使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


OperatorStateHandles类属于org.apache.flink.streaming.runtime.tasks包,在下文中一共展示了OperatorStateHandles类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeSinglePatternAfterMigrationSnapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSinglePatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
					getKeyedCepOpearator(false, new SinglePatternNFAFactory()),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorStateHandles snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-single-pattern-afterwards-flink" + flinkGenerateSavepointVersion + "-snapshot");
	} finally {
		harness.close();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:CEPMigrationTest.java

示例2: testLogTimeoutAlmostReachedWarningDuringRecovery

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Test
public void testLogTimeoutAlmostReachedWarningDuringRecovery() throws Exception {
	clock.setEpochMilli(0);

	final long transactionTimeout = 1000;
	final double warningRatio = 0.5;
	sinkFunction.setTransactionTimeout(transactionTimeout);
	sinkFunction.enableTransactionTimeoutWarnings(warningRatio);

	harness.open();

	final OperatorStateHandles snapshot = harness.snapshot(0, 1);
	final long elapsedTime = (long) ((double) transactionTimeout * warningRatio + 2);
	clock.setEpochMilli(elapsedTime);
	harness.initializeState(snapshot);

	final List<String> logMessages =
		loggingEvents.stream().map(LoggingEvent::getRenderedMessage).collect(Collectors.toList());

	assertThat(
		logMessages,
		hasItem(containsString("has been open for 502 ms. " +
			"This is close to or even exceeding the transaction timeout of 1000 ms.")));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:TwoPhaseCommitSinkFunctionTest.java

示例3: snapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Calls {@link StreamOperator#snapshotState(long, long, CheckpointOptions)}.
 */
public OperatorStateHandles snapshot(long checkpointId, long timestamp) throws Exception {

	OperatorSnapshotResult operatorStateResult = operator.snapshotState(
		checkpointId,
		timestamp,
		CheckpointOptions.forCheckpoint());

	KeyedStateHandle keyedManaged = FutureUtil.runIfNotDoneAndGet(operatorStateResult.getKeyedStateManagedFuture());
	KeyedStateHandle keyedRaw = FutureUtil.runIfNotDoneAndGet(operatorStateResult.getKeyedStateRawFuture());

	OperatorStateHandle opManaged = FutureUtil.runIfNotDoneAndGet(operatorStateResult.getOperatorStateManagedFuture());
	OperatorStateHandle opRaw = FutureUtil.runIfNotDoneAndGet(operatorStateResult.getOperatorStateRawFuture());

	return new OperatorStateHandles(
		0,
		keyedManaged != null ? Collections.singletonList(keyedManaged) : null,
		keyedRaw != null ? Collections.singletonList(keyedRaw) : null,
		opManaged != null ? Collections.singletonList(opManaged) : null,
		opRaw != null ? Collections.singletonList(opRaw) : null);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:24,代码来源:AbstractStreamOperatorTestHarness.java

示例4: writeSnapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeSnapshot() throws Exception {

	final File outDir = tempFolder.newFolder();

	BucketingSink<String> sink = new BucketingSink<String>(outDir.getAbsolutePath())
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX);

	OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkFs(outDir, 1, 1, 0, 0);

	testHarness.processElement(new StreamRecord<>("test3", 0L));
	testHarness.processElement(new StreamRecord<>("test4", 0L));
	testHarness.processElement(new StreamRecord<>("test5", 0L));

	checkFs(outDir, 1, 4, 0, 0);

	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);

	OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/bucketing-sink-migration-test-flink" + flinkGenerateSavepointVersion + "-snapshot");
	testHarness.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:43,代码来源:BucketingSinkMigrationTest.java

示例5: writeSnapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Manually run this to write binary snapshot data. Remove @Ignore to run.
 */
@Ignore
@Test
public void writeSnapshot() throws Exception {

	final File outDir = tempFolder.newFolder();

	BucketingSink<String> sink = new BucketingSink<String>(outDir.getAbsolutePath())
		.setWriter(new StringWriter<String>())
		.setBatchSize(5)
		.setPartPrefix(PART_PREFIX)
		.setInProgressPrefix("")
		.setPendingPrefix("")
		.setValidLengthPrefix("")
		.setInProgressSuffix(IN_PROGRESS_SUFFIX)
		.setPendingSuffix(PENDING_SUFFIX)
		.setValidLengthSuffix(VALID_LENGTH_SUFFIX);

	OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.setup();
	testHarness.open();

	testHarness.processElement(new StreamRecord<>("test1", 0L));
	testHarness.processElement(new StreamRecord<>("test2", 0L));

	checkFs(outDir, 1, 1, 0, 0);

	testHarness.processElement(new StreamRecord<>("test3", 0L));
	testHarness.processElement(new StreamRecord<>("test4", 0L));
	testHarness.processElement(new StreamRecord<>("test5", 0L));

	checkFs(outDir, 1, 4, 0, 0);

	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);

	OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/bucketing-sink-migration-test-flink1.2-snapshot");
	testHarness.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:43,代码来源:BucketingSinkFrom12MigrationTest.java

示例6: writeSinglePatternAfterMigrationSnapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSinglePatternAfterMigrationSnapshot() throws Exception {

	KeySelector<Event, Integer> keySelector = new KeySelector<Event, Integer>() {
		private static final long serialVersionUID = -4873366487571254798L;

		@Override
		public Integer getKey(Event value) throws Exception {
			return value.getId();
		}
	};

	final Event startEvent1 = new Event(42, "start", 1.0);

	OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
			new KeyedOneInputStreamOperatorTestHarness<>(
					new KeyedCEPPatternOperator<>(
							Event.createTypeSerializer(),
							false,
							IntSerializer.INSTANCE,
							new SinglePatternNFAFactory(),
							true),
					keySelector,
					BasicTypeInfo.INT_TYPE_INFO);

	try {
		harness.setup();
		harness.open();
		harness.processWatermark(new Watermark(5));

		// do snapshot and save to file
		OperatorStateHandles snapshot = harness.snapshot(0L, 0L);
		OperatorSnapshotUtil.writeStateHandle(snapshot,
			"src/test/resources/cep-migration-single-pattern-afterwards-flink1.2-snapshot");
	} finally {
		harness.close();
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:43,代码来源:CEPFrom12MigrationTest.java

示例7: testUDFReturningNull

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Test
public void testUDFReturningNull() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(null);
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例8: testUDFReturningEmpty

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Test
public void testUDFReturningEmpty() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(Collections.<Integer>emptyList());
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例9: testUDFReturningData

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Test
public void testUDFReturningData() throws Exception {
	TestUserFunction userFunction = new TestUserFunction(Arrays.asList(1, 2, 3));
	AbstractStreamOperatorTestHarness<Integer> testHarness =
			new AbstractStreamOperatorTestHarness<>(new StreamMap<>(userFunction), 1, 1, 0);
	testHarness.open();
	OperatorStateHandles snapshot = testHarness.snapshot(0L, 0L);
	testHarness.initializeState(snapshot);
	Assert.assertTrue(userFunction.isRestored());
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:11,代码来源:ListCheckpointedTest.java

示例10: testIgnoreCommitExceptionDuringRecovery

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Test
public void testIgnoreCommitExceptionDuringRecovery() throws Exception {
	clock.setEpochMilli(0);

	harness.open();
	harness.processElement("42", 0);

	final OperatorStateHandles snapshot = harness.snapshot(0, 1);
	harness.notifyOfCompletedCheckpoint(1);

	final long transactionTimeout = 1000;
	sinkFunction.setTransactionTimeout(transactionTimeout);
	sinkFunction.ignoreFailuresAfterTransactionTimeout();
	throwException.set(true);

	try {
		harness.initializeState(snapshot);
		fail("Expected exception not thrown");
	} catch (RuntimeException e) {
		assertEquals("Expected exception", e.getMessage());
	}

	clock.setEpochMilli(transactionTimeout + 1);
	harness.initializeState(snapshot);

	assertExactlyOnce(Collections.singletonList("42"));
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:28,代码来源:TwoPhaseCommitSinkFunctionTest.java

示例11: writeSessionWindowsWithCountTriggerInMintConditionSnapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSessionWindowsWithCountTriggerInMintConditionSnapshot() throws Exception {

	final int sessionSize = 3;

	TypeInformation<Tuple2<String, Integer>> inputType = TypeInfoParser.parse("Tuple2<String, Integer>");

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			inputType.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			PurgingTrigger.of(CountTrigger.of(4)),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	// do snapshot and save to file
	OperatorStateHandles snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(
		snapshot,
		"src/test/resources/win-op-migration-test-session-with-stateful-trigger-mint-flink" + flinkGenerateSavepointVersion + "-snapshot");

	testHarness.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:40,代码来源:WindowOperatorMigrationTest.java

示例12: writeSessionWindowsWithCountTriggerInMintConditionSnapshot

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Manually run this to write binary snapshot data.
 */
@Ignore
@Test
public void writeSessionWindowsWithCountTriggerInMintConditionSnapshot() throws Exception {

	final int sessionSize = 3;

	TypeInformation<Tuple2<String, Integer>> inputType = TypeInfoParser.parse("Tuple2<String, Integer>");

	ListStateDescriptor<Tuple2<String, Integer>> stateDesc = new ListStateDescriptor<>("window-contents",
			inputType.createSerializer(new ExecutionConfig()));

	WindowOperator<String, Tuple2<String, Integer>, Iterable<Tuple2<String, Integer>>, Tuple3<String, Long, Long>, TimeWindow> operator = new WindowOperator<>(
			EventTimeSessionWindows.withGap(Time.seconds(sessionSize)),
			new TimeWindow.Serializer(),
			new TupleKeySelector(),
			BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new ExecutionConfig()),
			stateDesc,
			new InternalIterableWindowFunction<>(new SessionWindowFunction()),
			PurgingTrigger.of(CountTrigger.of(4)),
			0,
			null /* late data output tag */);

	OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Tuple3<String, Long, Long>> testHarness =
			new KeyedOneInputStreamOperatorTestHarness<>(operator, new TupleKeySelector(), BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setup();
	testHarness.open();

	// do snapshot and save to file
	OperatorStateHandles snapshot = testHarness.snapshot(0, 0);
	OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/win-op-migration-test-session-with-stateful-trigger-mint-flink1.2-snapshot");

	testHarness.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:38,代码来源:WindowOperatorFrom12MigrationTest.java

示例13: restore

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
/**
 * Resumes execution from the provided {@link OperatorStateHandles}. This is used to test recovery after a failure.
 */
public void restore(OperatorStateHandles stateHandles) throws Exception {
	Preconditions.checkArgument(!isOpen,
		"You are trying to restore() while the operator is still open. " +
			"Please call close() first.");

	testHarness.setup();
	testHarness.initializeState(stateHandles);
	openOperator();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:13,代码来源:WindowingTestHarness.java

示例14: initializeState

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Override
public void initializeState(OperatorStateHandles operatorStateHandles) throws Exception {
	if (restoredKeyedState != null) {
		restoredKeyedState = operatorStateHandles.getManagedKeyedState();
	}

	super.initializeState(operatorStateHandles);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:9,代码来源:KeyedTwoInputStreamOperatorTestHarness.java

示例15: initializeState

import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; //导入依赖的package包/类
@Override
public void initializeState(OperatorStateHandles operatorStateHandles) throws Exception {
	if (operatorStateHandles != null) {
		int numKeyGroups = getEnvironment().getTaskInfo().getMaxNumberOfParallelSubtasks();
		int numSubtasks = getEnvironment().getTaskInfo().getNumberOfParallelSubtasks();
		int subtaskIndex = getEnvironment().getTaskInfo().getIndexOfThisSubtask();

		// create a new OperatorStateHandles that only contains the state for our key-groups

		List<KeyGroupRange> keyGroupPartitions = StateAssignmentOperation.createKeyGroupPartitions(
				numKeyGroups,
				numSubtasks);

		KeyGroupRange localKeyGroupRange =
				keyGroupPartitions.get(subtaskIndex);

		restoredKeyedState = null;
		Collection<KeyedStateHandle> managedKeyedState = operatorStateHandles.getManagedKeyedState();
		if (managedKeyedState != null) {

			// if we have migration handles, don't reshuffle state and preserve
			// the migration tag
			if (hasMigrationHandles(managedKeyedState)) {
				List<KeyedStateHandle> result = new ArrayList<>(managedKeyedState.size());
				result.addAll(managedKeyedState);
				restoredKeyedState = result;
			} else {
				restoredKeyedState = StateAssignmentOperation.getKeyedStateHandles(
						managedKeyedState,
						localKeyGroupRange);
			}
		}
	}

	super.initializeState(operatorStateHandles);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:37,代码来源:KeyedOneInputStreamOperatorTestHarness.java


注:本文中的org.apache.flink.streaming.runtime.tasks.OperatorStateHandles类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。