本文整理汇总了Java中org.apache.flink.api.java.ExecutionEnvironment.setRestartStrategy方法的典型用法代码示例。如果您正苦于以下问题:Java ExecutionEnvironment.setRestartStrategy方法的具体用法?Java ExecutionEnvironment.setRestartStrategy怎么用?Java ExecutionEnvironment.setRestartStrategy使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.api.java.ExecutionEnvironment
的用法示例。
在下文中一共展示了ExecutionEnvironment.setRestartStrategy方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: executeTask
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
private void executeTask(MapFunction<Long, Long> mapper, int retries) throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(retries, 0));
List<Long> result = env.generateSequence(1, 9)
.map(mapper)
.collect();
MultipleProgramsTestBase.compareResultAsText(result, "1\n2\n3\n4\n5\n6\n7\n8\n9");
}
示例2: testRestartMultipleTimes
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Test
public void testRestartMultipleTimes() {
try {
List<Long> resultCollection = new ArrayList<Long>();
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment(
"localhost", cluster.getLeaderRPCPort());
env.setParallelism(4);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 100));
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10)
.rebalance()
.map(new FailingMapper3<Long>())
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
})
.output(new LocalCollectionOutputFormat<Long>(resultCollection));
executeAndRunAssertions(env);
long sum = 0;
for (long l : resultCollection) {
sum += l;
}
assertEquals(55, sum);
}
catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
示例3: receiveParameters
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
private void receiveParameters(ExecutionEnvironment env) throws IOException {
for (int x = 0; x < 4; x++) {
Tuple value = (Tuple) streamer.getRecord(true);
switch (Parameters.valueOf(((String) value.getField(0)).toUpperCase())) {
case DOP:
Integer dop = value.<Integer>getField(1);
env.setParallelism(dop);
break;
case MODE:
if (value.<Boolean>getField(1)) {
LOG.info("Local execution specified, using default for {}.", PythonOptions.DC_TMP_DIR);
tmpDistributedDir = new Path(PythonOptions.DC_TMP_DIR.defaultValue());
}
break;
case RETRY:
int retry = value.<Integer>getField(1);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(retry, 10000L));
break;
case ID:
currentEnvironmentID = value.<Integer>getField(1);
break;
}
}
if (env.getParallelism() < 0) {
env.setParallelism(1);
}
}
示例4: testBatchInputWithFailure
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
/**
* Verifies that the input format reads all records exactly-once in the presence of job failures.
*/
@Test
public void testBatchInputWithFailure() throws Exception {
final int numElements = 100;
// set up the stream
final String streamName = RandomStringUtils.randomAlphabetic(20);
SETUP_UTILS.createTestStream(streamName, 3);
try (
final EventStreamWriter<Integer> eventWriter = SETUP_UTILS.getIntegerWriter(streamName);
// create the producer that writes to the stream
final ThrottledIntegerWriter producer = new ThrottledIntegerWriter(
eventWriter,
numElements,
numElements + 1, // no need to block writer for a batch test
0
)
) {
// write batch input
producer.start();
producer.sync();
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000L));
env.setParallelism(3);
// simple pipeline that reads from Pravega and collects the events
List<Integer> integers = env.createInput(
new FlinkPravegaInputFormat<>(
SETUP_UTILS.getControllerUri(),
SETUP_UTILS.getScope(),
Collections.singleton(streamName),
new IntDeserializer()),
BasicTypeInfo.INT_TYPE_INFO
).map(new FailOnceMapper(numElements / 2)).collect();
// verify that the job did fail, and all events were still read
Assert.assertTrue(FailOnceMapper.hasFailed());
Assert.assertEquals(numElements, integers.size());
FailOnceMapper.reset();
}
}
示例5: testTaskManagerFailure
import org.apache.flink.api.java.ExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testTaskManagerFailure(int jobManagerPort, final File coordinateDir) throws Exception {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
env.setParallelism(PARALLELISM);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 10000));
env.getConfig().setExecutionMode(executionMode);
env.getConfig().disableSysoutLogging();
final long numElements = 100000L;
final DataSet<Long> result = env.generateSequence(1, numElements)
// make sure every mapper is involved (no one is skipped because of lazy split assignment)
.rebalance()
// the majority of the behavior is in the MapFunction
.map(new RichMapFunction<Long, Long>() {
private final File proceedFile = new File(coordinateDir, PROCEED_MARKER_FILE);
private boolean markerCreated = false;
private boolean checkForProceedFile = true;
@Override
public Long map(Long value) throws Exception {
if (!markerCreated) {
int taskIndex = getRuntimeContext().getIndexOfThisSubtask();
touchFile(new File(coordinateDir, READY_MARKER_FILE_PREFIX + taskIndex));
markerCreated = true;
}
// check if the proceed file exists
if (checkForProceedFile) {
if (proceedFile.exists()) {
checkForProceedFile = false;
} else {
// otherwise wait so that we make slow progress
Thread.sleep(100);
}
}
return value;
}
})
.reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
});
long sum = result.collect().get(0);
assertEquals(numElements * (numElements + 1L) / 2L, sum);
}