本文整理汇总了Java中org.apache.flink.util.FlinkException类的典型用法代码示例。如果您正苦于以下问题:Java FlinkException类的具体用法?Java FlinkException怎么用?Java FlinkException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FlinkException类属于org.apache.flink.util包,在下文中一共展示了FlinkException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: triggerCheckpoint
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Triggers the checkpoint in the Flink source operator.
*
* <p>This method assumes that the {@code checkpointIdentifier} is a string of the form
*/
private void triggerCheckpoint(String checkpointIdentifier) throws FlinkException {
Preconditions.checkState(checkpointTrigger != null, "checkpoint trigger not set");
log.debug("{} received checkpoint event for {}",
getRuntimeContext().getTaskNameWithSubtasks(), checkpointIdentifier);
final long checkpointId;
try {
checkpointId = ReaderCheckpointHook.parseCheckpointId(checkpointIdentifier);
} catch (IllegalArgumentException e) {
throw new FlinkException("Cannot trigger checkpoint due to invalid Pravega checkpoint name", e.getCause());
}
checkpointTrigger.triggerCheckpoint(checkpointId);
}
示例2: testInvalidAkkaConfiguration
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Ensure that that Akka configuration parameters can be set.
*/
@Test(expected=FlinkException.class)
public void testInvalidAkkaConfiguration() throws Throwable {
Configuration config = new Configuration();
config.setString(AkkaOptions.STARTUP_TIMEOUT, INVALID_STARTUP_TIMEOUT);
final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
cluster.getHostname(),
cluster.getPort(),
config
);
env.getConfig().disableSysoutLogging();
DataSet<String> result = env.createInput(new TestNonRichInputFormat());
result.output(new LocalCollectionOutputFormat<>(new ArrayList<String>()));
try {
env.execute();
Assert.fail("Program should not run successfully, cause of invalid akka settings.");
} catch (ProgramInvocationException ex) {
throw ex.getCause();
}
}
示例3: testInvalidAkkaConfiguration
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Ensure that that Akka configuration parameters can be set.
*/
@Test(expected = FlinkException.class)
public void testInvalidAkkaConfiguration() throws Throwable {
Configuration config = new Configuration();
config.setString(AkkaOptions.STARTUP_TIMEOUT, INVALID_STARTUP_TIMEOUT);
final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
cluster.getHostname(),
cluster.getPort(),
config
);
env.getConfig().disableSysoutLogging();
DataSet<String> result = env.createInput(new TestNonRichInputFormat());
result.output(new LocalCollectionOutputFormat<>(new ArrayList<String>()));
try {
env.execute();
Assert.fail("Program should not run successfully, cause of invalid akka settings.");
} catch (ProgramInvocationException ex) {
throw ex.getCause();
}
}
示例4: handleRequest
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
@Override
public final CompletableFuture<String> handleRequest(AccessExecutionJobVertex jobVertex, Map<String, String> params) {
final String subtaskNumberString = params.get("subtasknum");
if (subtaskNumberString == null) {
return FutureUtils.completedExceptionally(new FlinkException("Subtask number parameter missing"));
}
final int subtask;
try {
subtask = Integer.parseInt(subtaskNumberString);
}
catch (NumberFormatException e) {
return FutureUtils.completedExceptionally(new FlinkException("Invalid subtask number parameter", e));
}
if (subtask < 0 || subtask >= jobVertex.getParallelism()) {
return FutureUtils.completedExceptionally(new FlinkException("subtask does not exist: " + subtask));
}
final AccessExecutionVertex vertex = jobVertex.getTaskVertices()[subtask];
return handleRequest(vertex, params);
}
示例5: getNumberOfAvailableSlotsForGroup
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
CompletableFuture<Integer> getNumberOfAvailableSlotsForGroup(SlotSharingGroupId slotSharingGroupId, JobVertexID jobVertexId) {
return callAsync(
() -> {
final SlotSharingManager multiTaskSlotManager = slotSharingManagers.get(slotSharingGroupId);
if (multiTaskSlotManager != null) {
int availableSlots = 0;
for (SlotSharingManager.MultiTaskSlot multiTaskSlot : multiTaskSlotManager.getResolvedRootSlots()) {
if (!multiTaskSlot.contains(jobVertexId)) {
availableSlots++;
}
}
return availableSlots;
} else {
throw new FlinkException("No MultiTaskSlotmanager registered under " + slotSharingGroupId + '.');
}
},
TestingUtils.infiniteTime());
}
示例6: triggerSavepoint
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Sends a {@link org.apache.flink.runtime.messages.JobManagerMessages.TriggerSavepoint}
* message to the job manager.
*/
private String triggerSavepoint(ClusterClient<?> clusterClient, JobID jobId, String savepointDirectory) throws FlinkException {
logAndSysout("Triggering savepoint for job " + jobId + '.');
CompletableFuture<String> savepointPathFuture = clusterClient.triggerSavepoint(jobId, savepointDirectory);
logAndSysout("Waiting for response...");
final String savepointPath;
try {
savepointPath = savepointPathFuture.get();
}
catch (Exception e) {
Throwable cause = ExceptionUtils.stripExecutionException(e);
throw new FlinkException("Triggering a savepoint for the job " + jobId + " failed.", cause);
}
logAndSysout("Savepoint completed. Path: " + savepointPath);
logAndSysout("You can resume your program from this savepoint with the run command.");
return savepointPath;
}
示例7: connectToJob
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Reattaches to a running job with the given job id.
*
* @param jobID The job id of the job to attach to
* @return The JobExecutionResult for the jobID
* @throws JobExecutionException if an error occurs during monitoring the job execution
*/
public JobListeningContext connectToJob(JobID jobID) throws JobExecutionException {
final ActorSystem actorSystem;
try {
actorSystem = actorSystemLoader.get();
} catch (FlinkException fe) {
throw new JobExecutionException(
jobID,
"Could not start the ActorSystem needed to talk to the JobManager.",
fe);
}
return JobClient.attachToRunningJob(
jobID,
flinkConfig,
actorSystem,
highAvailabilityServices,
timeout,
printStatusDuringExecution);
}
示例8: removeSubsumed
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Removes a subsumed checkpoint from ZooKeeper and drops the state.
*/
private void removeSubsumed(
final CompletedCheckpoint completedCheckpoint) throws Exception {
if (completedCheckpoint == null) {
return;
}
ZooKeeperStateHandleStore.RemoveCallback<CompletedCheckpoint> action =
new ZooKeeperStateHandleStore.RemoveCallback<CompletedCheckpoint>() {
@Override
public void apply(@Nullable RetrievableStateHandle<CompletedCheckpoint> value) throws FlinkException {
if (value != null) {
try {
completedCheckpoint.discardOnSubsume();
} catch (Exception e) {
throw new FlinkException("Could not discard the completed checkpoint on subsume.", e);
}
}
}
};
checkpointsInZooKeeper.releaseAndTryRemove(
checkpointIdToPath(completedCheckpoint.getCheckpointID()),
action);
}
示例9: removeShutdown
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Removes a checkpoint from ZooKeeper because of Job shutdown and drops the state.
*/
private void removeShutdown(
final CompletedCheckpoint completedCheckpoint,
final JobStatus jobStatus) throws Exception {
if (completedCheckpoint == null) {
return;
}
ZooKeeperStateHandleStore.RemoveCallback<CompletedCheckpoint> removeAction = new ZooKeeperStateHandleStore.RemoveCallback<CompletedCheckpoint>() {
@Override
public void apply(@Nullable RetrievableStateHandle<CompletedCheckpoint> value) throws FlinkException {
try {
completedCheckpoint.discardOnShutdown(jobStatus);
} catch (Exception e) {
throw new FlinkException("Could not discard the completed checkpoint on subsume.", e);
}
}
};
checkpointsInZooKeeper.releaseAndTryRemove(
checkpointIdToPath(completedCheckpoint.getCheckpointID()),
removeAction);
}
示例10: retrieveCompletedCheckpoint
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
private static CompletedCheckpoint retrieveCompletedCheckpoint(Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> stateHandlePath) throws FlinkException {
long checkpointId = pathToCheckpointId(stateHandlePath.f1);
LOG.info("Trying to retrieve checkpoint {}.", checkpointId);
try {
return stateHandlePath.f0.retrieveState();
} catch (ClassNotFoundException cnfe) {
throw new FlinkException("Could not retrieve checkpoint " + checkpointId + " from state handle under " +
stateHandlePath.f1 + ". This indicates that you are trying to recover from state written by an " +
"older Flink version which is not compatible. Try cleaning the state handle store.", cnfe);
} catch (IOException ioe) {
throw new FlinkException("Could not retrieve checkpoint " + checkpointId + " from state handle under " +
stateHandlePath.f1 + ". This indicates that the retrieved state handle is broken. Try cleaning the " +
"state handle store.", ioe);
}
}
示例11: triggerMasterHooks
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Triggers all given master hooks and returns state objects for each hook that
* produced a state.
*
* @param hooks The hooks to trigger
* @param checkpointId The checkpoint ID of the triggering checkpoint
* @param timestamp The (informational) timestamp for the triggering checkpoint
* @param executor An executor that can be used for asynchronous I/O calls
* @param timeout The maximum time that a hook may take to complete
*
* @return A list containing all states produced by the hooks
*
* @throws FlinkException Thrown, if the hooks throw an exception, or the state+
* deserialization fails.
*/
public static List<MasterState> triggerMasterHooks(
Collection<MasterTriggerRestoreHook<?>> hooks,
long checkpointId,
long timestamp,
Executor executor,
Time timeout) throws FlinkException {
final ArrayList<MasterState> states = new ArrayList<>(hooks.size());
for (MasterTriggerRestoreHook<?> hook : hooks) {
MasterState state = triggerHook(hook, checkpointId, timestamp, executor, timeout);
if (state != null) {
states.add(state);
}
}
states.trimToSize();
return states;
}
示例12: deserializeState
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
private static <T> T deserializeState(MasterState state, MasterTriggerRestoreHook<?> hook) throws FlinkException {
@SuppressWarnings("unchecked")
final MasterTriggerRestoreHook<T> typedHook = (MasterTriggerRestoreHook<T>) hook;
final String id = hook.getIdentifier();
try {
final SimpleVersionedSerializer<T> deserializer = typedHook.createCheckpointDataSerializer();
if (deserializer == null) {
throw new FlinkException("null serializer for state of hook " + hook.getIdentifier());
}
return deserializer.deserialize(state.version(), state.bytes());
}
catch (Throwable t) {
throw new FlinkException("Cannot deserialize state for master hook '" + id + '\'', t);
}
}
示例13: restoreHook
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
private static <T> void restoreHook(
final Object state,
final MasterTriggerRestoreHook<?> hook,
final long checkpointId) throws FlinkException {
@SuppressWarnings("unchecked")
final T typedState = (T) state;
@SuppressWarnings("unchecked")
final MasterTriggerRestoreHook<T> typedHook = (MasterTriggerRestoreHook<T>) hook;
try {
typedHook.restoreCheckpoint(checkpointId, typedState);
}
catch (FlinkException e) {
throw e;
}
catch (Throwable t) {
// catch all here, including Errors that may come from dependency and classpath issues
ExceptionUtils.rethrowIfFatalError(t);
throw new FlinkException("Error while calling restoreCheckpoint on checkpoint hook '"
+ hook.getIdentifier() + '\'', t);
}
}
示例14: initializeLocation
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
private void initializeLocation(ExecutionVertex vertex, TaskManagerLocation location) throws Exception {
// we need a bit of reflection magic to initialize the location without going through
// scheduling paths. we choose to do that, rather than the alternatives:
// - mocking the scheduler created fragile tests that break whenever the scheduler is adjusted
// - exposing test methods in the ExecutionVertex leads to undesirable setters
SlotContext slot = new SimpleSlotContext(
new AllocationID(),
location,
0,
mock(TaskManagerGateway.class));
SimpleSlot simpleSlot = new SimpleSlot(slot, mock(SlotOwner.class), 0);
if (!vertex.getCurrentExecutionAttempt().tryAssignResource(simpleSlot)) {
throw new FlinkException("Could not assign resource.");
}
}
示例15: releaseSharedSlot
import org.apache.flink.util.FlinkException; //导入依赖的package包/类
/**
* Called from {@link org.apache.flink.runtime.instance.SharedSlot#releaseSlot(Throwable)}.
*
* @param sharedSlot The slot to be released.
*/
void releaseSharedSlot(SharedSlot sharedSlot) {
synchronized (lock) {
if (sharedSlot.markCancelled()) {
// we are releasing this slot
if (sharedSlot.hasChildren()) {
final FlinkException cause = new FlinkException("Releasing shared slot parent.");
// by simply releasing all children, we should eventually release this slot.
Set<Slot> children = sharedSlot.getSubSlots();
while (children.size() > 0) {
children.iterator().next().releaseSlot(cause);
}
}
else {
// if there are no children that trigger the release, we trigger it directly
internalDisposeEmptySharedSlot(sharedSlot);
}
}
}
}