本文整理汇总了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.disableOperatorChaining方法的典型用法代码示例。如果您正苦于以下问题:Java StreamExecutionEnvironment.disableOperatorChaining方法的具体用法?Java StreamExecutionEnvironment.disableOperatorChaining怎么用?Java StreamExecutionEnvironment.disableOperatorChaining使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
的用法示例。
在下文中一共展示了StreamExecutionEnvironment.disableOperatorChaining方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
// parse arguments
ParameterTool params = ParameterTool.fromPropertiesFile(args[0]);
// create streaming environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// enable event time processing
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// enable fault-tolerance
env.enableCheckpointing(1000);
// enable restarts
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(50, 500L));
env.setStateBackend(new FsStateBackend("file:///home/robert/flink-workdir/flink-streaming-etl/state-backend"));
// run each operator separately
env.disableOperatorChaining();
// get data from Kafka
Properties kParams = params.getProperties();
kParams.setProperty("group.id", UUID.randomUUID().toString());
DataStream<ObjectNode> inputStream = env.addSource(new FlinkKafkaConsumer09<>(params.getRequired("topic"), new JSONDeserializationSchema(), kParams)).name("Kafka 0.9 Source")
.assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<ObjectNode>(Time.minutes(1L)) {
@Override
public long extractTimestamp(ObjectNode jsonNodes) {
return jsonNodes.get("timestamp_ms").asLong();
}
}).name("Timestamp extractor");
// filter out records without lang field
DataStream<ObjectNode> tweetsWithLang = inputStream.filter(jsonNode -> jsonNode.has("user") && jsonNode.get("user").has("lang")).name("Filter records without 'lang' field");
// select only lang = "en" tweets
DataStream<ObjectNode> englishTweets = tweetsWithLang.filter(jsonNode -> jsonNode.get("user").get("lang").asText().equals("en")).name("Select 'lang'=en tweets");
// write to file system
RollingSink<ObjectNode> rollingSink = new RollingSink<>(params.get("sinkPath", "/home/robert/flink-workdir/flink-streaming-etl/rolling-sink"));
rollingSink.setBucketer(new DateTimeBucketer("yyyy-MM-dd-HH-mm")); // do a bucket for each minute
englishTweets.addSink(rollingSink).name("Rolling FileSystem Sink");
// build aggregates (count per language) using window (10 seconds tumbling):
DataStream<Tuple3<Long, String, Long>> languageCounts = tweetsWithLang.keyBy(jsonNode -> jsonNode.get("user").get("lang").asText())
.timeWindow(Time.seconds(10))
.apply(new Tuple3<>(0L, "", 0L), new JsonFoldCounter(), new CountEmitter()).name("Count per Langauage (10 seconds tumbling)");
// write window aggregate to ElasticSearch
List<InetSocketAddress> transportNodes = ImmutableList.of(new InetSocketAddress(InetAddress.getByName("localhost"), 9300));
ElasticsearchSink<Tuple3<Long, String, Long>> elasticsearchSink = new ElasticsearchSink<>(params.toMap(), transportNodes, new ESRequest());
languageCounts.addSink(elasticsearchSink).name("ElasticSearch2 Sink");
// word-count on the tweet stream
DataStream<Tuple2<Date, List<Tuple2<String, Long>>>> topWordCount = tweetsWithLang
// get text from tweets
.map(tweet -> tweet.get("text").asText()).name("Get text from Tweets")
// split text into (word, 1) tuples
.flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
@Override
public void flatMap(String s, Collector<Tuple2<String, Long>> collector) throws Exception {
String[] splits = s.split(" ");
for (String sp : splits) {
collector.collect(new Tuple2<>(sp, 1L));
}
}
}).name("Tokenize words")
// group by word
.keyBy(0)
// build 1 min windows, compute every 10 seconds --> count word frequency
.timeWindow(Time.minutes(1L), Time.seconds(10L)).apply(new WordCountingWindow()).name("Count word frequency (1 min, 10 sec sliding window)")
// build top n every 10 seconds
.timeWindowAll(Time.seconds(10L)).apply(new TopNWords(10)).name("TopN Window (10s)");
// write top Ns to Kafka topic
topWordCount.addSink(new FlinkKafkaProducer09<>(params.getRequired("wc-topic"), new ListSerSchema(), params.getProperties())).name("Write topN to Kafka");
env.execute("Streaming ETL");
}
示例2: testProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
env.enableCheckpointing(20);
env.setParallelism(12);
env.disableOperatorChaining();
DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
DataStream<String> mapped = stream
.map(new OnceFailingIdentityMapper(NUM_STRINGS));
BucketingSink<String> sink = new BucketingSink<String>(outPath)
.setBucketer(new BasePathBucketer<String>())
.setBatchSize(10000)
.setValidLengthPrefix("")
.setPendingPrefix("")
.setPendingSuffix(PENDING_SUFFIX)
.setInProgressSuffix(IN_PROGRESS_SUFFIX);
mapped.addSink(sink);
}
示例3: testProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
env.enableCheckpointing(20);
env.setParallelism(12);
env.disableOperatorChaining();
DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
DataStream<String> mapped = stream
.map(new OnceFailingIdentityMapper(NUM_STRINGS));
RollingSink<String> sink = new RollingSink<String>(outPath)
.setBucketer(new NonRollingBucketer())
.setBatchSize(10000)
.setValidLengthPrefix("")
.setPendingPrefix("")
.setPendingSuffix(PENDING_SUFFIX)
.setInProgressSuffix(IN_PROGRESS_SUFFIX);
mapped.addSink(sink);
}
示例4: createJobGraph
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* Creates a streaming JobGraph from the StreamEnvironment.
*/
private JobGraph createJobGraph(
int parallelism,
int numberOfRetries,
long restartDelay) {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.disableOperatorChaining();
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(numberOfRetries, restartDelay));
env.getConfig().disableSysoutLogging();
DataStream<Integer> stream = env
.addSource(new InfiniteTestSource())
.shuffle()
.map(new StatefulCounter());
stream.addSink(new DiscardingSink<Integer>());
return env.getStreamGraph().getJobGraph();
}
示例5: testNodeHashIdenticalSources
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests that there are no collisions with two identical sources.
*
* <pre>
* [ (src0) ] --\
* +--> [ (sink) ]
* [ (src1) ] --/
* </pre>
*/
@Test
public void testNodeHashIdenticalSources() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(4);
env.disableOperatorChaining();
DataStream<String> src0 = env.addSource(new NoOpSourceFunction());
DataStream<String> src1 = env.addSource(new NoOpSourceFunction());
src0.union(src1).addSink(new NoOpSinkFunction());
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
assertTrue(vertices.get(0).isInputVertex());
assertTrue(vertices.get(1).isInputVertex());
assertNotNull(vertices.get(0).getID());
assertNotNull(vertices.get(1).getID());
assertNotEquals(vertices.get(0).getID(), vertices.get(1).getID());
}
示例6: testNodeHashIdenticalNodes
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests that there are no collisions with two identical intermediate nodes connected to the
* same predecessor.
*
* <pre>
* /-> [ (map) ] -> [ (sink) ]
* [ (src) ] -+
* \-> [ (map) ] -> [ (sink) ]
* </pre>
*/
@Test
public void testNodeHashIdenticalNodes() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(4);
env.disableOperatorChaining();
DataStream<String> src = env.addSource(new NoOpSourceFunction());
src.map(new NoOpMapFunction()).addSink(new NoOpSinkFunction());
src.map(new NoOpMapFunction()).addSink(new NoOpSinkFunction());
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
Set<JobVertexID> vertexIds = new HashSet<>();
for (JobVertex vertex : jobGraph.getVertices()) {
assertTrue(vertexIds.add(vertex.getID()));
}
}
示例7: testProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
int PARALLELISM = 12;
env.enableCheckpointing(Long.MAX_VALUE);
env.setParallelism(PARALLELISM);
env.disableOperatorChaining();
DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
DataStream<String> mapped = stream
.map(new OnceFailingIdentityMapper(NUM_STRINGS));
BucketingSink<String> sink = new BucketingSink<String>(outPath)
.setBucketer(new BasePathBucketer<String>())
.setBatchSize(5000)
.setValidLengthPrefix("")
.setPendingPrefix("");
mapped.addSink(sink);
}
示例8: testProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
int PARALLELISM = 12;
env.enableCheckpointing(20);
env.setParallelism(PARALLELISM);
env.disableOperatorChaining();
DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
DataStream<String> mapped = stream
.map(new OnceFailingIdentityMapper(NUM_STRINGS));
BucketingSink<String> sink = new BucketingSink<String>(outPath)
.setBucketer(new BasePathBucketer<String>())
.setBatchSize(10000)
.setValidLengthPrefix("")
.setPendingPrefix("")
.setPendingSuffix(PENDING_SUFFIX)
.setInProgressSuffix(IN_PROGRESS_SUFFIX);
mapped.addSink(sink);
}
示例9: testProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
int PARALLELISM = 12;
env.enableCheckpointing(Long.MAX_VALUE);
env.setParallelism(PARALLELISM);
env.disableOperatorChaining();
DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
DataStream<String> mapped = stream
.map(new OnceFailingIdentityMapper(NUM_STRINGS));
RollingSink<String> sink = new RollingSink<String>(outPath)
.setBucketer(new NonRollingBucketer())
.setBatchSize(5000)
.setValidLengthPrefix("")
.setPendingPrefix("");
mapped.addSink(sink);
}
示例10: testProgram
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);
int PARALLELISM = 12;
env.enableCheckpointing(20);
env.setParallelism(PARALLELISM);
env.disableOperatorChaining();
DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();
DataStream<String> mapped = stream
.map(new OnceFailingIdentityMapper(NUM_STRINGS));
RollingSink<String> sink = new RollingSink<String>(outPath)
.setBucketer(new NonRollingBucketer())
.setBatchSize(10000)
.setValidLengthPrefix("")
.setPendingPrefix("")
.setPendingSuffix(PENDING_SUFFIX)
.setInProgressSuffix(IN_PROGRESS_SUFFIX);
mapped.addSink(sink);
}
示例11: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().disableSysoutLogging();
env.enableCheckpointing(CHECKPOINT_INTERVALL);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 10000));
env.disableOperatorChaining();
DataStream<String> text = env.addSource(new SimpleStringGenerator());
text.map(new StatefulMapper()).addSink(new NoOpSink());
env.setParallelism(1);
env.execute("Checkpointed Streaming Program");
}
示例12: main
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().disableSysoutLogging();
env.enableCheckpointing(CHECKPOINT_INTERVALL);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 100L));
env.disableOperatorChaining();
DataStream<String> text = env.addSource(new SimpleStringGenerator());
text.map(new StatefulMapper()).addSink(new NoOpSink());
env.setParallelism(1);
env.execute("Checkpointed Streaming Program");
}
示例13: testManualHashAssignmentCollisionThrowsException
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //导入方法依赖的package包/类
/**
* Tests that a collision on the manual hash throws an Exception.
*/
@Test(expected = IllegalArgumentException.class)
public void testManualHashAssignmentCollisionThrowsException() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(4);
env.disableOperatorChaining();
env.addSource(new NoOpSourceFunction()).uid("source")
.map(new NoOpMapFunction()).uid("source") // Collision
.addSink(new NoOpSinkFunction());
// This call is necessary to generate the job graph
env.getStreamGraph().getJobGraph();
}