本文整理汇总了Java中org.apache.flink.runtime.jobgraph.JobGraph.getJobID方法的典型用法代码示例。如果您正苦于以下问题:Java JobGraph.getJobID方法的具体用法?Java JobGraph.getJobID怎么用?Java JobGraph.getJobID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.runtime.jobgraph.JobGraph
的用法示例。
在下文中一共展示了JobGraph.getJobID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: runJobBlocking
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* This method runs a job in blocking mode. The method returns only after the job
* completed successfully, or after it failed terminally.
*
* @param job The Flink job to execute
* @return The result of the job execution
*
* @throws JobExecutionException Thrown if anything went amiss during initial job launch,
* or if the job terminally failed.
*/
public JobExecutionResult runJobBlocking(JobGraph job) throws JobExecutionException, InterruptedException {
checkNotNull(job);
LOG.info("Received job for blocking execution: {} ({})", job.getName(), job.getJobID());
final BlockingJobSync sync = new BlockingJobSync(job.getJobID(), numJobManagers);
synchronized (lock) {
checkState(!shutdown, "mini cluster is shut down");
checkState(runners == null, "mini cluster can only execute one job at a time");
this.runners = startJobRunners(job, sync, sync);
}
try {
return sync.getResult();
}
finally {
// always clear the status for the next job
runners = null;
clearJobRunningState(job.getJobID());
}
}
示例2: addJob
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
public JobManagerJobMetricGroup addJob(JobGraph job) {
JobID jobId = job.getJobID();
String jobName = job.getName();
// get or create a jobs metric group
JobManagerJobMetricGroup currentJobGroup;
synchronized (this) {
if (!isClosed()) {
currentJobGroup = jobs.get(jobId);
if (currentJobGroup == null || currentJobGroup.isClosed()) {
currentJobGroup = new JobManagerJobMetricGroup(registry, this, jobId, jobName);
jobs.put(jobId, currentJobGroup);
}
return currentJobGroup;
} else {
return null;
}
}
}
示例3: createJobManagerRunner
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
protected JobManagerRunner createJobManagerRunner(
Configuration configuration,
ResourceID resourceId,
RpcService rpcService,
HighAvailabilityServices highAvailabilityServices,
JobManagerServices jobManagerServices,
HeartbeatServices heartbeatServices,
MetricRegistry metricRegistry,
FatalErrorHandler fatalErrorHandler,
@Nullable String restAddress) throws Exception {
final JobGraph jobGraph = retrieveJobGraph(configuration);
return new JobManagerRunner(
resourceId,
jobGraph,
configuration,
rpcService,
highAvailabilityServices,
heartbeatServices,
jobManagerServices,
metricRegistry,
new TerminatingOnCompleteActions(jobGraph.getJobID()),
fatalErrorHandler,
restAddress);
}
示例4: testStopYarn
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
@Test
public void testStopYarn() throws Exception {
// this only works if there is no active job at this point
assertTrue(cluster.getCurrentlyRunningJobsJava().isEmpty());
// Create a task
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(2);
sender.setInvokableClass(StoppableInvokable.class);
final JobGraph jobGraph = new JobGraph("Stoppable streaming test job", sender);
final JobID jid = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// wait for job to show up
while (cluster.getCurrentlyRunningJobsJava().isEmpty()) {
Thread.sleep(10);
}
final FiniteDuration testTimeout = new FiniteDuration(2, TimeUnit.MINUTES);
final Deadline deadline = testTimeout.fromNow();
while (!cluster.getCurrentlyRunningJobsJava().isEmpty()) {
try (HttpTestClient client = new HttpTestClient("localhost", port)) {
// Request the file from the web server
client.sendGetRequest("/jobs/" + jid + "/yarn-stop", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client
.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals(response.getType(), MimeTypes.getMimeTypeForExtension("json"));
assertEquals("{}", response.getContent());
}
Thread.sleep(20);
}
}
示例5: testStopYarn
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
@Test
public void testStopYarn() throws Exception {
// this only works if there is no active job at this point
assertTrue(cluster.getCurrentlyRunningJobsJava().isEmpty());
// Create a task
final JobVertex sender = new JobVertex("Sender");
sender.setParallelism(2);
sender.setInvokableClass(StoppableInvokable.class);
final JobGraph jobGraph = new JobGraph("Stoppable streaming test job", sender);
final JobID jid = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// wait for job to show up
while (cluster.getCurrentlyRunningJobsJava().isEmpty()) {
Thread.sleep(10);
}
final FiniteDuration testTimeout = new FiniteDuration(2, TimeUnit.MINUTES);
final Deadline deadline = testTimeout.fromNow();
while (!cluster.getCurrentlyRunningJobsJava().isEmpty()) {
try (HttpTestClient client = new HttpTestClient("localhost", port)) {
// Request the file from the web server
client.sendGetRequest("/jobs/" + jid + "/yarn-stop", deadline.timeLeft());
HttpTestClient.SimpleHttpResponse response = client
.getNextResponse(deadline.timeLeft());
assertEquals(HttpResponseStatus.OK, response.getStatus());
assertEquals("application/json; charset=UTF-8", response.getType());
assertEquals("{}", response.getContent());
}
Thread.sleep(20);
}
}
示例6: runDetached
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* This method executes a job in detached mode. The method returns immediately after the job
* has been added to the
*
* @param job The Flink job to execute
*
* @throws JobExecutionException Thrown if anything went amiss during initial job launch,
* or if the job terminally failed.
*/
public void runDetached(JobGraph job) throws JobExecutionException {
checkNotNull(job);
LOG.info("Received job for detached execution: {} ({})", job.getName(), job.getJobID());
synchronized (lock) {
checkState(!shutdown, "mini cluster is shut down");
checkState(runners == null, "mini cluster can only execute one job at a time");
DetachedFinalizer finalizer = new DetachedFinalizer(job.getJobID(), numJobManagers);
this.runners = startJobRunners(job, finalizer, finalizer);
}
}
示例7: testValueStateShortcut
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests simple value state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The tests succeeds after each subtask index is queried with
* value numElements (the latest element updated the state).
*
* <p>This is the same as the simple value state test, but uses the API shortcut.
*/
@Test
public void testValueStateShortcut() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final long numElements = 1024L;
final QueryableStateClient client = new QueryableStateClient(
"localhost",
Integer.parseInt(QueryableStateOptions.PROXY_PORT_RANGE.defaultValue()));
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state shortcut
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 9168901838808830068L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("matata");
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
final ValueStateDescriptor<Tuple2<Integer, Long>> stateDesc =
(ValueStateDescriptor<Tuple2<Integer, Long>>) queryableState.getStateDescriptor();
executeValueQuery(deadline, client, jobId, "matata", stateDesc, numElements);
} finally {
// Free cluster resources
if (jobId != null) {
CompletableFuture<CancellationSuccess> cancellation = FutureUtils.toJava(
cluster.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class)));
cancellation.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
}
client.shutdown();
}
}
示例8: testQueryNonStartedJobState
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Similar tests as {@link #testValueState()} but before submitting the
* job, we already issue one request which fails.
*/
@Test
public void testQueryNonStartedJobState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state
ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
"any",
source.getType(),
null);
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 7480503339992214681L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("hakuna", valueState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
// Now query
long expected = numElements;
// query once
client.getKvState(
jobId,
queryableState.getQueryableStateName(),
0,
VoidNamespace.INSTANCE,
BasicTypeInfo.INT_TYPE_INFO,
VoidNamespaceTypeInfo.INSTANCE,
valueState);
cluster.submitJobDetached(jobGraph);
executeQuery(deadline, client, jobId, "hakuna", valueState, expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例9: testValueStateShortcut
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests simple value state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The tests succeeds after each subtask index is queried with
* value numElements (the latest element updated the state).
*
* <p>This is the same as the simple value state test, but uses the API shortcut.
*/
@Test
public void testValueStateShortcut() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state shortcut
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 9168901838808830068L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("matata");
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// Now query
long expected = numElements;
executeQuery(deadline, client, jobId, "matata",
queryableState.getValueSerializer(), expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例10: testReducingState
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests simple reducing state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The reducing state instance sums these up. The test succeeds
* after each subtask index is queried with result n*(n+1)/2.
*/
@Test
public void testReducingState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Reducing state
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState =
new ReducingStateDescriptor<>(
"any",
new SumReduce(),
source.getType());
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 8470749712274833552L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("jungle", reducingState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// Wait until job is running
// Now query
long expected = numElements * (numElements + 1) / 2;
executeQuery(deadline, client, jobId, "jungle", reducingState, expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例11: testValueState
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests simple value state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The tests succeeds after each subtask index is queried with
* value numElements (the latest element updated the state).
*/
@Test
public void testValueState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(NUM_SLOTS);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state
ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
"any",
source.getType());
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("hakuna", valueState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// Now query
long expected = numElements;
executeValueQuery(deadline, client, jobId, queryableState,
expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例12: testQueryNonStartedJobState
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Similar tests as {@link #testValueState()} but before submitting the
* job, we already issue one request which fails.
*/
@Test
public void testQueryNonStartedJobState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(NUM_SLOTS);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state
ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
"any",
source.getType(),
null);
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("hakuna", valueState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
// Now query
long expected = numElements;
// query once
client.getKvState(jobId, queryableState.getQueryableStateName(), 0,
KvStateRequestSerializer.serializeKeyAndNamespace(
0,
queryableState.getKeySerializer(),
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE));
cluster.submitJobDetached(jobGraph);
executeValueQuery(deadline, client, jobId, queryableState,
expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例13: testValueStateShortcut
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests simple value state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The tests succeeds after each subtask index is queried with
* value numElements (the latest element updated the state).
*
* This is the same as the simple value state test, but uses the API shortcut.
*/
@Test
public void testValueStateShortcut() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(NUM_SLOTS);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state shortcut
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("matata");
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// Now query
long expected = numElements;
executeValueQuery(deadline, client, jobId, queryableState,
expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例14: testReducingState
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Tests simple reducing state queryable state instance. Each source emits
* (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
* queried. The reducing state instance sums these up. The test succeeds
* after each subtask index is queried with result n*(n+1)/2.
*/
@Test
public void testReducingState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final int numElements = 1024;
final QueryableStateClient client = new QueryableStateClient(cluster.configuration());
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(NUM_SLOTS);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Reducing state
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState =
new ReducingStateDescriptor<>(
"any",
new SumReduce(),
source.getType());
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("jungle", reducingState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
cluster.submitJobDetached(jobGraph);
// Wait until job is running
// Now query
long expected = numElements * (numElements + 1) / 2;
executeValueQuery(deadline, client, jobId, queryableState,
expected);
} finally {
// Free cluster resources
if (jobId != null) {
Future<CancellationSuccess> cancellation = cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class));
Await.ready(cancellation, deadline.timeLeft());
}
client.shutDown();
}
}
示例15: testQueryNonStartedJobState
import org.apache.flink.runtime.jobgraph.JobGraph; //导入方法依赖的package包/类
/**
* Similar tests as {@link #testValueState()} but before submitting the
* job, we already issue one request which fails.
*/
@Test
public void testQueryNonStartedJobState() throws Exception {
// Config
final Deadline deadline = TEST_TIMEOUT.fromNow();
final long numElements = 1024L;
JobID jobId = null;
try {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env
.addSource(new TestAscendingValueSource(numElements));
// Value state
ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>(
"any",
source.getType(),
null);
QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState =
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 7480503339992214681L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) throws Exception {
return value.f0;
}
}).asQueryableState("hakuna", valueState);
// Submit the job graph
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
jobId = jobGraph.getJobID();
// Now query
long expected = numElements;
// query once
client.getKvState(
jobId,
queryableState.getQueryableStateName(),
0,
VoidNamespace.INSTANCE,
BasicTypeInfo.INT_TYPE_INFO,
VoidNamespaceTypeInfo.INSTANCE,
valueState);
cluster.submitJobDetached(jobGraph);
executeValueQuery(deadline, client, jobId, "hakuna", valueState, expected);
} finally {
// Free cluster resources
if (jobId != null) {
CompletableFuture<CancellationSuccess> cancellation = FutureUtils.toJava(cluster
.getLeaderGateway(deadline.timeLeft())
.ask(new JobManagerMessages.CancelJob(jobId), deadline.timeLeft())
.mapTo(ClassTag$.MODULE$.<CancellationSuccess>apply(CancellationSuccess.class)));
cancellation.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
}
}
}