当前位置: 首页>>代码示例>>Java>>正文


Java RichParallelSourceFunction类代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction的典型用法代码示例。如果您正苦于以下问题:Java RichParallelSourceFunction类的具体用法?Java RichParallelSourceFunction怎么用?Java RichParallelSourceFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RichParallelSourceFunction类属于org.apache.flink.streaming.api.functions.source包,在下文中一共展示了RichParallelSourceFunction类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runAutoOffsetResetTest

import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; //导入依赖的package包/类
public void runAutoOffsetResetTest() throws Exception {
	final String topic = "auto-offset-reset-test";

	final int parallelism = 1;
	final int elementsPerPartition = 50000;

	Properties tprops = new Properties();
	tprops.setProperty("retention.ms", "250");
	kafkaServer.createTestTopic(topic, parallelism, 1, tprops);

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
	env.getConfig().disableSysoutLogging();

	// ----------- add producer dataflow ----------

	DataStream<String> stream = env.addSource(new RichParallelSourceFunction<String>() {

		private boolean running = true;

		@Override
		public void run(SourceContext<String> ctx) throws InterruptedException {
			int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
			int limit = cnt + elementsPerPartition;

			while (running && !stopProducer && cnt < limit) {
				ctx.collect("element-" + cnt);
				cnt++;
				Thread.sleep(10);
			}
			LOG.info("Stopping producer");
		}

		@Override
		public void cancel() {
			running = false;
		}
	});
	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	kafkaServer.produceIntoKafka(stream, topic, new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), props, null);

	// ----------- add consumer dataflow ----------

	NonContinousOffsetsDeserializationSchema deserSchema = new NonContinousOffsetsDeserializationSchema();
	FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, deserSchema, props);

	DataStreamSource<String> consuming = env.addSource(source);
	consuming.addSink(new DiscardingSink<String>());

	tryExecute(env, "run auto offset reset test");

	kafkaServer.deleteTestTopic(topic);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:57,代码来源:KafkaShortRetentionTestBase.java

示例2: runAutoOffsetResetTest

import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; //导入依赖的package包/类
public void runAutoOffsetResetTest() throws Exception {
	final String topic = "auto-offset-reset-test";

	final int parallelism = 1;
	final int elementsPerPartition = 50000;

	Properties tprops = new Properties();
	tprops.setProperty("retention.ms", "250");
	kafkaServer.createTestTopic(topic, parallelism, 1, tprops);

	final StreamExecutionEnvironment env =
			StreamExecutionEnvironment.createRemoteEnvironment("localhost", flink.getLeaderRPCPort());
	env.setParallelism(parallelism);
	env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
	env.getConfig().disableSysoutLogging();


	// ----------- add producer dataflow ----------


	DataStream<String> stream = env.addSource(new RichParallelSourceFunction<String>() {

		private boolean running = true;

		@Override
		public void run(SourceContext<String> ctx) throws InterruptedException {
			int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
			int limit = cnt + elementsPerPartition;


			while (running && !stopProducer && cnt < limit) {
				ctx.collect("element-" + cnt);
				cnt++;
				Thread.sleep(10);
			}
			LOG.info("Stopping producer");
		}

		@Override
		public void cancel() {
			running = false;
		}
	});
	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	kafkaServer.produceIntoKafka(stream, topic, new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), props, null);

	// ----------- add consumer dataflow ----------

	NonContinousOffsetsDeserializationSchema deserSchema = new NonContinousOffsetsDeserializationSchema();
	FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, deserSchema, props);

	DataStreamSource<String> consuming = env.addSource(source);
	consuming.addSink(new DiscardingSink<String>());

	tryExecute(env, "run auto offset reset test");

	kafkaServer.deleteTestTopic(topic);
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:61,代码来源:KafkaShortRetentionTestBase.java

示例3: main

import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	final ParameterTool pt = ParameterTool.fromArgs(args);
	see.getConfig().setGlobalJobParameters(pt);
	see.getConfig().enableObjectReuse();

	// see.setParallelism(1);

	DataStreamSource<Integer> src = see.addSource(new RichParallelSourceFunction<Integer>() {

		boolean running = true;
		@Override
		public void run(SourceContext<Integer> ctx) throws Exception {
			int i = 0;
			while (running) {
				ctx.collect(i++);
			}
		}

		@Override
		public void cancel() {
			running = false;
		}
	});

	src/*.map(new MapFunction<Integer, Integer>() {
		@Override
		public Integer map(Integer s) throws Exception {
			return s;
		}
	}).*/.map(new MapFunction<Integer, Integer>() {
		@Override
		public Integer map(Integer s) throws Exception {
			return s;
		}
	}).flatMap(new RichFlatMapFunction<Integer, Integer>() {
		long received = 0;
		long logfreq = pt.getInt("logfreq");
		long lastLog = -1;
		long lastElements = 0;
		long matches = 0;
		private final Pattern threeDigitAbbr = Pattern.compile("[A-Z]{3}\\.");

		@Override
		public void open(Configuration parameters) throws Exception {
			super.open(parameters);
		}

		@Override
		public void flatMap(Integer in, Collector<Integer> collector) throws Exception {

			received++;
			if (received % logfreq == 0) {
				// throughput over entire time
				long now = System.currentTimeMillis();

				// throughput for the last "logfreq" elements
				if (lastLog == -1) {
					// init (the first)
					lastLog = now;
					lastElements = received;
				} else {
					long timeDiff = now - lastLog;
					long elementDiff = received - lastElements;
					double ex = (1000 / (double) timeDiff);
					LOG.info("During the last {} ms, we received {} elements. That's {} elements/second/core", timeDiff, elementDiff, Double.valueOf(elementDiff * ex).longValue());
					// reinit
					lastLog = now;
					lastElements = received;
				}
			}
		}
	});

	see.execute();
}
 
开发者ID:project-flink,项目名称:flink-perf,代码行数:77,代码来源:ChainingSpeed.java


注:本文中的org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。