當前位置: 首頁>>代碼示例>>Java>>正文


Java StreamExecutionEnvironment.disableOperatorChaining方法代碼示例

本文整理匯總了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.disableOperatorChaining方法的典型用法代碼示例。如果您正苦於以下問題:Java StreamExecutionEnvironment.disableOperatorChaining方法的具體用法?Java StreamExecutionEnvironment.disableOperatorChaining怎麽用?Java StreamExecutionEnvironment.disableOperatorChaining使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.disableOperatorChaining方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	// parse arguments
	ParameterTool params = ParameterTool.fromPropertiesFile(args[0]);

	// create streaming environment
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	// enable event time processing
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	// enable fault-tolerance
	env.enableCheckpointing(1000);

	// enable restarts
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(50, 500L));

	env.setStateBackend(new FsStateBackend("file:///home/robert/flink-workdir/flink-streaming-etl/state-backend"));

	// run each operator separately
	env.disableOperatorChaining();

	// get data from Kafka
	Properties kParams = params.getProperties();
	kParams.setProperty("group.id", UUID.randomUUID().toString());
	DataStream<ObjectNode> inputStream = env.addSource(new FlinkKafkaConsumer09<>(params.getRequired("topic"), new JSONDeserializationSchema(), kParams)).name("Kafka 0.9 Source")
		.assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<ObjectNode>(Time.minutes(1L)) {
			@Override
			public long extractTimestamp(ObjectNode jsonNodes) {
				return jsonNodes.get("timestamp_ms").asLong();
			}
		}).name("Timestamp extractor");

	// filter out records without lang field
	DataStream<ObjectNode> tweetsWithLang = inputStream.filter(jsonNode -> jsonNode.has("user") && jsonNode.get("user").has("lang")).name("Filter records without 'lang' field");

	// select only lang = "en" tweets
	DataStream<ObjectNode> englishTweets = tweetsWithLang.filter(jsonNode -> jsonNode.get("user").get("lang").asText().equals("en")).name("Select 'lang'=en tweets");

	// write to file system
	RollingSink<ObjectNode> rollingSink = new RollingSink<>(params.get("sinkPath", "/home/robert/flink-workdir/flink-streaming-etl/rolling-sink"));
	rollingSink.setBucketer(new DateTimeBucketer("yyyy-MM-dd-HH-mm")); // do a bucket for each minute
	englishTweets.addSink(rollingSink).name("Rolling FileSystem Sink");

	// build aggregates (count per language) using window (10 seconds tumbling):
	DataStream<Tuple3<Long, String, Long>> languageCounts = tweetsWithLang.keyBy(jsonNode -> jsonNode.get("user").get("lang").asText())
		.timeWindow(Time.seconds(10))
		.apply(new Tuple3<>(0L, "", 0L), new JsonFoldCounter(), new CountEmitter()).name("Count per Langauage (10 seconds tumbling)");

	// write window aggregate to ElasticSearch
	List<InetSocketAddress> transportNodes = ImmutableList.of(new InetSocketAddress(InetAddress.getByName("localhost"), 9300));
	ElasticsearchSink<Tuple3<Long, String, Long>> elasticsearchSink = new ElasticsearchSink<>(params.toMap(), transportNodes, new ESRequest());

	languageCounts.addSink(elasticsearchSink).name("ElasticSearch2 Sink");

	// word-count on the tweet stream
	DataStream<Tuple2<Date, List<Tuple2<String, Long>>>> topWordCount = tweetsWithLang
		// get text from tweets
		.map(tweet -> tweet.get("text").asText()).name("Get text from Tweets")
		// split text into (word, 1) tuples
		.flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
			@Override
			public void flatMap(String s, Collector<Tuple2<String, Long>> collector) throws Exception {
				String[] splits = s.split(" ");
				for (String sp : splits) {
					collector.collect(new Tuple2<>(sp, 1L));
				}
			}
		}).name("Tokenize words")
		// group by word
		.keyBy(0)
		// build 1 min windows, compute every 10 seconds --> count word frequency
		.timeWindow(Time.minutes(1L), Time.seconds(10L)).apply(new WordCountingWindow()).name("Count word frequency (1 min, 10 sec sliding window)")
		// build top n every 10 seconds
		.timeWindowAll(Time.seconds(10L)).apply(new TopNWords(10)).name("TopN Window (10s)");

	// write top Ns to Kafka topic
	topWordCount.addSink(new FlinkKafkaProducer09<>(params.getRequired("wc-topic"), new ListSerSchema(), params.getProperties())).name("Write topN to Kafka");

	env.execute("Streaming ETL");

}
 
開發者ID:rmetzger,項目名稱:flink-streaming-etl,代碼行數:82,代碼來源:StreamingETL.java

示例2: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:BucketingSinkFaultToleranceITCase.java

示例3: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:RollingSinkFaultToleranceITCase.java

示例4: createJobGraph

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * Creates a streaming JobGraph from the StreamEnvironment.
 */
private JobGraph createJobGraph(
	int parallelism,
	int numberOfRetries,
	long restartDelay) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.disableOperatorChaining();
	env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(numberOfRetries, restartDelay));
	env.getConfig().disableSysoutLogging();

	DataStream<Integer> stream = env
		.addSource(new InfiniteTestSource())
		.shuffle()
		.map(new StatefulCounter());

	stream.addSink(new DiscardingSink<Integer>());

	return env.getStreamGraph().getJobGraph();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:24,代碼來源:SavepointITCase.java

示例5: testNodeHashIdenticalSources

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * Tests that there are no collisions with two identical sources.
 *
 * <pre>
 * [ (src0) ] --\
 *               +--> [ (sink) ]
 * [ (src1) ] --/
 * </pre>
 */
@Test
public void testNodeHashIdenticalSources() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
	env.setParallelism(4);
	env.disableOperatorChaining();

	DataStream<String> src0 = env.addSource(new NoOpSourceFunction());
	DataStream<String> src1 = env.addSource(new NoOpSourceFunction());

	src0.union(src1).addSink(new NoOpSinkFunction());

	JobGraph jobGraph = env.getStreamGraph().getJobGraph();

	List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
	assertTrue(vertices.get(0).isInputVertex());
	assertTrue(vertices.get(1).isInputVertex());

	assertNotNull(vertices.get(0).getID());
	assertNotNull(vertices.get(1).getID());

	assertNotEquals(vertices.get(0).getID(), vertices.get(1).getID());
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:32,代碼來源:StreamingJobGraphGeneratorNodeHashTest.java

示例6: testNodeHashIdenticalNodes

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * Tests that there are no collisions with two identical intermediate nodes connected to the
 * same predecessor.
 *
 * <pre>
 *             /-> [ (map) ] -> [ (sink) ]
 * [ (src) ] -+
 *             \-> [ (map) ] -> [ (sink) ]
 * </pre>
 */
@Test
public void testNodeHashIdenticalNodes() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
	env.setParallelism(4);
	env.disableOperatorChaining();

	DataStream<String> src = env.addSource(new NoOpSourceFunction());

	src.map(new NoOpMapFunction()).addSink(new NoOpSinkFunction());

	src.map(new NoOpMapFunction()).addSink(new NoOpSinkFunction());

	JobGraph jobGraph = env.getStreamGraph().getJobGraph();
	Set<JobVertexID> vertexIds = new HashSet<>();
	for (JobVertex vertex : jobGraph.getVertices()) {
		assertTrue(vertexIds.add(vertex.getID()));
	}
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:29,代碼來源:StreamingJobGraphGeneratorNodeHashTest.java

示例7: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(Long.MAX_VALUE);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(5000)
			.setValidLengthPrefix("")
			.setPendingPrefix("");

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:BucketingSinkFaultTolerance2ITCase.java

示例8: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:27,代碼來源:BucketingSinkFaultToleranceITCase.java

示例9: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(Long.MAX_VALUE);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(5000)
			.setValidLengthPrefix("")
			.setPendingPrefix("");

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:RollingSinkFaultTolerance2ITCase.java

示例10: testProgram

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:27,代碼來源:RollingSinkFaultToleranceITCase.java

示例11: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.getConfig().disableSysoutLogging();
	env.enableCheckpointing(CHECKPOINT_INTERVALL);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 10000));
	env.disableOperatorChaining();

	DataStream<String> text = env.addSource(new SimpleStringGenerator());
	text.map(new StatefulMapper()).addSink(new NoOpSink());
	env.setParallelism(1);
	env.execute("Checkpointed Streaming Program");
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:13,代碼來源:LegacyCheckpointedStreamingProgram.java

示例12: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.getConfig().disableSysoutLogging();
	env.enableCheckpointing(CHECKPOINT_INTERVALL);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 100L));
	env.disableOperatorChaining();

	DataStream<String> text = env.addSource(new SimpleStringGenerator());
	text.map(new StatefulMapper()).addSink(new NoOpSink());
	env.setParallelism(1);
	env.execute("Checkpointed Streaming Program");
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:14,代碼來源:CheckpointedStreamingProgram.java

示例13: testManualHashAssignmentCollisionThrowsException

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * Tests that a collision on the manual hash throws an Exception.
 */
@Test(expected = IllegalArgumentException.class)
public void testManualHashAssignmentCollisionThrowsException() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
	env.setParallelism(4);
	env.disableOperatorChaining();

	env.addSource(new NoOpSourceFunction()).uid("source")
			.map(new NoOpMapFunction()).uid("source") // Collision
			.addSink(new NoOpSinkFunction());

	// This call is necessary to generate the job graph
	env.getStreamGraph().getJobGraph();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:17,代碼來源:StreamingJobGraphGeneratorNodeHashTest.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.disableOperatorChaining方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。