本文整理汇总了Java中org.apache.flink.runtime.state.AbstractStateBackend类的典型用法代码示例。如果您正苦于以下问题:Java AbstractStateBackend类的具体用法?Java AbstractStateBackend怎么用?Java AbstractStateBackend使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AbstractStateBackend类属于org.apache.flink.runtime.state包,在下文中一共展示了AbstractStateBackend类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parameters
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
@Parameterized.Parameters
public static Collection<AbstractStateBackend> parameters() throws IOException {
TemporaryFolder tempFolder = new TemporaryFolder();
tempFolder.create();
MemoryStateBackend syncMemBackend = new MemoryStateBackend(MAX_MEM_STATE_SIZE, false);
MemoryStateBackend asyncMemBackend = new MemoryStateBackend(MAX_MEM_STATE_SIZE, true);
FsStateBackend syncFsBackend = new FsStateBackend("file://" + tempFolder.newFolder().getAbsolutePath(), false);
FsStateBackend asyncFsBackend = new FsStateBackend("file://" + tempFolder.newFolder().getAbsolutePath(), true);
RocksDBStateBackend fullRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), false);
fullRocksDbBackend.setDbStoragePath(tempFolder.newFolder().getAbsolutePath());
RocksDBStateBackend incRocksDbBackend = new RocksDBStateBackend(new MemoryStateBackend(MAX_MEM_STATE_SIZE), true);
incRocksDbBackend.setDbStoragePath(tempFolder.newFolder().getAbsolutePath());
return Arrays.asList(
syncMemBackend,
asyncMemBackend,
syncFsBackend,
asyncFsBackend,
fullRocksDbBackend,
incRocksDbBackend);
}
示例2: materialize
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
@Override
public StateHandle<DataInputView> materialize() throws Exception {
AbstractStateBackend.CheckpointStateOutputView out = backend.createCheckpointStateOutputView(
checkpointId,
timestamp);
int numWindows = stateSnapshot.size();
out.writeInt(numWindows);
for (Map.Entry<Long, Map<Long, Long>> window: stateSnapshot.entrySet()) {
out.writeLong(window.getKey());
int numKeys = window.getValue().size();
out.writeInt(numKeys);
for (Map.Entry<Long, Long> value : window.getValue().entrySet()) {
out.writeLong(value.getKey());
out.writeLong(value.getValue());
}
}
return out.closeAndGetHandle();
}
示例3: testCallsForwardedToNonPartitionedBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
@Test
public void testCallsForwardedToNonPartitionedBackend() throws Exception {
AbstractStateBackend nonPartBackend = mock(AbstractStateBackend.class);
RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(nonPartBackend);
Environment env = getMockEnvironment();
rocksDbBackend.createStreamFactory(env.getJobID(), "foobar");
verify(nonPartBackend, times(1)).createStreamFactory(any(JobID.class), anyString());
}
示例4: setStateBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
/**
* @deprecated Use {@link #setStateBackend(StateBackend)} instead.
*/
@Deprecated
@PublicEvolving
public StreamExecutionEnvironment setStateBackend(AbstractStateBackend backend) {
this.defaultStateBackend = Preconditions.checkNotNull(backend);
return this;
}
示例5: DataInputViewAsynchronousStateHandle
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
public DataInputViewAsynchronousStateHandle(long checkpointId,
long timestamp,
Map<Long, Map<Long, Long>> stateSnapshot,
AbstractStateBackend backend) {
this.checkpointId = checkpointId;
this.timestamp = timestamp;
this.stateSnapshot = stateSnapshot;
this.backend = backend;
}
示例6: getStateBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
/**
* State backend to store Beam's state during computation.
* Note: Only applicable when executing in streaming mode.
*/
@Description("Sets the state backend to use in streaming mode. "
+ "Otherwise the default is read from the Flink config.")
@JsonIgnore
AbstractStateBackend getStateBackend();
示例7: createStateBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
@Override
protected AbstractStateBackend createStateBackend() throws Exception {
return new FsStateBackend(temporaryFolder.newFolder().toURI().toString());
}
示例8: createStateBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
@Override
protected AbstractStateBackend createStateBackend() throws Exception {
return new MemoryStateBackend();
}
示例9: createStateBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
@Override
protected AbstractStateBackend createStateBackend() throws Exception {
return new RocksDBStateBackend(temporaryFolder.newFolder().toURI().toString());
}
示例10: testProgramWithBackend
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
protected void testProgramWithBackend(AbstractStateBackend stateBackend) throws Exception {
assertEquals("Broken test setup", 0, (NUM_STRINGS / 2) % NUM_KEYS);
final StreamExecutionEnvironment env = new TestStreamEnvironment(cluster, PARALLELISM);
env.setParallelism(PARALLELISM);
env.enableCheckpointing(500);
env.getConfig().disableSysoutLogging();
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L));
env.setStateBackend(stateBackend);
// compute when (randomly) the failure should happen
final int failurePosMin = (int) (0.6 * NUM_STRINGS / PARALLELISM);
final int failurePosMax = (int) (0.8 * NUM_STRINGS / PARALLELISM);
final int failurePos = (new Random().nextInt(failurePosMax - failurePosMin) + failurePosMin);
final DataStream<Integer> stream1 = env.addSource(
new IntGeneratingSourceFunction(NUM_STRINGS / 2, NUM_STRINGS / 4));
final DataStream<Integer> stream2 = env.addSource(
new IntGeneratingSourceFunction(NUM_STRINGS / 2, NUM_STRINGS / 4));
stream1.union(stream2)
.keyBy(new IdentityKeySelector<Integer>())
.map(new OnceFailingPartitionedSum(failurePos))
.keyBy(0)
.addSink(new CounterSink());
env.execute();
// verify that we counted exactly right
assertEquals(NUM_KEYS, CounterSink.ALL_COUNTS.size());
assertEquals(NUM_KEYS, OnceFailingPartitionedSum.ALL_SUMS.size());
for (Entry<Integer, Long> sum : OnceFailingPartitionedSum.ALL_SUMS.entrySet()) {
assertEquals((long) sum.getKey() * NUM_STRINGS / NUM_KEYS, sum.getValue().longValue());
}
for (long count : CounterSink.ALL_COUNTS.values()) {
assertEquals(NUM_STRINGS / NUM_KEYS, count);
}
}
示例11: testCleanupOfSnapshotsInFailureCase
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
/**
* Test that the snapshot files are cleaned up in case of a failure during the snapshot
* procedure.
*/
@Test
public void testCleanupOfSnapshotsInFailureCase() throws Exception {
long checkpointId = 1L;
long timestamp = 42L;
Environment env = new DummyEnvironment("test task", 1, 0);
CheckpointStreamFactory.CheckpointStateOutputStream outputStream = mock(CheckpointStreamFactory.CheckpointStateOutputStream.class);
CheckpointStreamFactory checkpointStreamFactory = mock(CheckpointStreamFactory.class);
AbstractStateBackend stateBackend = mock(AbstractStateBackend.class);
final IOException testException = new IOException("Test exception");
doReturn(checkpointStreamFactory).when(stateBackend).createStreamFactory(any(JobID.class), anyString());
doThrow(testException).when(outputStream).write(anyInt());
doReturn(outputStream).when(checkpointStreamFactory).createCheckpointStateOutputStream(eq(checkpointId), eq(timestamp));
RocksDBStateBackend backend = new RocksDBStateBackend(stateBackend);
backend.setDbStoragePath(temporaryFolder.newFolder().toURI().toString());
AbstractKeyedStateBackend<Void> keyedStateBackend = backend.createKeyedStateBackend(
env,
new JobID(),
"test operator",
VoidSerializer.INSTANCE,
1,
new KeyGroupRange(0, 0),
null);
try {
keyedStateBackend.restore(null);
// register a state so that the state backend has to checkpoint something
keyedStateBackend.getPartitionedState(
"namespace",
StringSerializer.INSTANCE,
new ValueStateDescriptor<>("foobar", String.class));
RunnableFuture<KeyedStateHandle> snapshotFuture = keyedStateBackend.snapshot(
checkpointId, timestamp, checkpointStreamFactory, CheckpointOptions.forCheckpoint());
try {
FutureUtil.runIfNotDoneAndGet(snapshotFuture);
fail("Expected an exception to be thrown here.");
} catch (ExecutionException e) {
Assert.assertEquals(testException, e.getCause());
}
verify(outputStream).close();
} finally {
IOUtils.closeQuietly(keyedStateBackend);
keyedStateBackend.dispose();
}
}
示例12: testQueryUnknownKey
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
/**
* Tests the failure response with {@link UnknownKeyOrNamespace} as cause
* on queries for non-existing keys.
*/
@Test
public void testQueryUnknownKey() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateServerHandler handler = new KvStateServerHandler(registry, TEST_THREAD_POOL, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
KeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(
dummyEnv,
new JobID(),
"test_op",
IntSerializer.INSTANCE,
numKeyGroups,
new KeyGroupRange(0, 0),
registry.createTaskRegistry(dummyEnv.getJobID(), dummyEnv.getJobVertexId()));
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(registryListener);
// Register state
ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(
1238283,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 22982L;
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(
channel.alloc(),
requestId,
registryListener.kvStateId,
serializedKeyAndNamespace);
// Write the request and wait for the response
channel.writeInbound(request);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertEquals(KvStateRequestType.REQUEST_FAILURE, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequestFailure response = KvStateRequestSerializer.deserializeKvStateRequestFailure(buf);
assertEquals(requestId, response.getRequestId());
assertTrue("Did not respond with expected failure cause", response.getCause() instanceof UnknownKeyOrNamespace);
assertEquals(1, stats.getNumRequests());
assertEquals(1, stats.getNumFailed());
}
示例13: testQueryExecutorShutDown
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
/**
* Tests the failure response on a rejected execution, because the query
* executor has been closed.
*/
@Test
public void testQueryExecutorShutDown() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
ExecutorService closedExecutor = Executors.newSingleThreadExecutor();
closedExecutor.shutdown();
assertTrue(closedExecutor.isShutdown());
KvStateServerHandler handler = new KvStateServerHandler(registry, closedExecutor, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
KeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(
dummyEnv,
new JobID(),
"test_op",
IntSerializer.INSTANCE,
numKeyGroups,
new KeyGroupRange(0, 0),
registry.createTaskRegistry(dummyEnv.getJobID(), dummyEnv.getJobVertexId()));
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(registryListener);
// Register state
ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(
channel.alloc(),
282872,
registryListener.kvStateId,
new byte[0]);
// Write the request and wait for the response
channel.writeInbound(request);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertEquals(KvStateRequestType.REQUEST_FAILURE, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequestFailure response = KvStateRequestSerializer.deserializeKvStateRequestFailure(buf);
assertTrue(response.getCause().getMessage().contains("RejectedExecutionException"));
assertEquals(1, stats.getNumRequests());
assertEquals(1, stats.getNumFailed());
}
示例14: testChunkedResponse
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
/**
* Tests that large responses are chunked.
*/
@Test
public void testChunkedResponse() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateServerHandler handler = new KvStateServerHandler(registry, TEST_THREAD_POOL, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new MemoryStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(
dummyEnv,
new JobID(),
"test_op",
IntSerializer.INSTANCE,
numKeyGroups,
new KeyGroupRange(0, 0),
registry.createTaskRegistry(dummyEnv.getJobID(), dummyEnv.getJobVertexId()));
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(registryListener);
// Register state
ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<byte[]> state = backend.getPartitionedState(
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
desc);
// Update KvState
byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()];
byte current = 0;
for (int i = 0; i < bytes.length; i++) {
bytes[i] = current++;
}
int key = 99812822;
backend.setCurrentKey(key);
state.update(bytes);
// Request
byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(
key,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 182828L;
assertTrue(registryListener.registrationName.equals("vanilla"));
ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(
channel.alloc(),
requestId,
registryListener.kvStateId,
serializedKeyAndNamespace);
// Write the request and wait for the response
channel.writeInbound(request);
Object msg = readInboundBlocking(channel);
assertTrue("Not ChunkedByteBuf", msg instanceof ChunkedByteBuf);
}
示例15: runTestDeclineOnCheckpointError
import org.apache.flink.runtime.state.AbstractStateBackend; //导入依赖的package包/类
private void runTestDeclineOnCheckpointError(AbstractStateBackend backend) throws Exception{
TestDeclinedCheckpointResponder checkpointResponder = new TestDeclinedCheckpointResponder();
Task task =
createTask(new FilterOperator(), backend, checkpointResponder, false);
// start the task and wait until it is in "restore"
task.startTaskThread();
checkpointResponder.declinedLatch.await();
Assert.assertEquals(ExecutionState.RUNNING, task.getExecutionState());
task.cancelExecution();
task.getExecutingThread().join();
}