本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setBufferTimeout方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.setBufferTimeout方法的具体用法?Java StreamExecutionEnvironment.setBufferTimeout怎么用?Java StreamExecutionEnvironment.setBufferTimeout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
的用法示例。
在下文中一共展示了StreamExecutionEnvironment.setBufferTimeout方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: runPartitioningProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
private static void runPartitioningProgram(int jobManagerPort, int parallelism) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
env.setParallelism(parallelism);
env.getConfig().enableObjectReuse();
env.setBufferTimeout(5L);
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
env
.addSource(new TimeStampingSource())
.map(new IdMapper<Tuple2<Long, Long>>())
.keyBy(0)
.addSink(new TimestampingSink());
env.execute("Partitioning Program");
}
示例2: setupExecutionEnvironment
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static final StreamExecutionEnvironment setupExecutionEnvironment(AppConfiguration config) {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
//env.getConfig().setAutoWatermarkInterval(1000000);
env.setBufferTimeout(config.getBufferTimeout());
return env;
}
示例3: runMultipleSourcesOnePartitionExactlyOnceTest
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests the proper consumption when having more Flink sources than Kafka partitions, which means
* that some Flink sources will read no partitions.
*/
public void runMultipleSourcesOnePartitionExactlyOnceTest() throws Exception {
final String topic = "manyToOneTopic";
final int numPartitions = 5;
final int numElementsPerPartition = 1000;
final int totalElements = numPartitions * numElementsPerPartition;
final int failAfterElements = numElementsPerPartition / 3;
final int parallelism = 8;
createTestTopic(topic, numPartitions, 1);
DataGenerators.generateRandomizedIntegerSequence(
StreamExecutionEnvironment.getExecutionEnvironment(),
kafkaServer,
topic,
numPartitions,
numElementsPerPartition,
true);
// run the topology that fails and recovers
DeserializationSchema<Integer> schema =
new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(500);
env.setParallelism(parallelism);
// set the number of restarts to one. The failing mapper will fail once, then it's only success exceptions.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
env.getConfig().disableSysoutLogging();
env.setBufferTimeout(0);
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);
env
.addSource(kafkaSource)
.map(new PartitionValidatingMapper(numPartitions, 1))
.map(new FailingIdentityMapper<Integer>(failAfterElements))
.addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);
FailingIdentityMapper.failedBefore = false;
tryExecute(env, "multi-source-one-partitions exactly once test");
deleteTestTopic(topic);
}
示例4: runMultipleSourcesOnePartitionExactlyOnceTest
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests the proper consumption when having more Flink sources than Kafka partitions, which means
* that some Flink sources will read no partitions.
*/
public void runMultipleSourcesOnePartitionExactlyOnceTest() throws Exception {
final String topic = "manyToOneTopic";
final int numPartitions = 5;
final int numElementsPerPartition = 1000;
final int totalElements = numPartitions * numElementsPerPartition;
final int failAfterElements = numElementsPerPartition / 3;
final int parallelism = 8;
createTestTopic(topic, numPartitions, 1);
DataGenerators.generateRandomizedIntegerSequence(
StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort),
kafkaServer,
topic, numPartitions, numElementsPerPartition, true);
// run the topology that fails and recovers
DeserializationSchema<Integer> schema =
new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
env.enableCheckpointing(500);
env.setParallelism(parallelism);
// set the number of restarts to one. The failing mapper will fail once, then it's only success exceptions.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
env.getConfig().disableSysoutLogging();
env.setBufferTimeout(0);
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);
env
.addSource(kafkaSource)
.map(new PartitionValidatingMapper(numPartitions, 1))
.map(new FailingIdentityMapper<Integer>(failAfterElements))
.addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);
FailingIdentityMapper.failedBefore = false;
tryExecute(env, "multi-source-one-partitions exactly once test");
deleteTestTopic(topic);
}