當前位置: 首頁>>代碼示例>>Java>>正文


Java StreamExecutionEnvironment.addSource方法代碼示例

本文整理匯總了Java中org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.addSource方法的典型用法代碼示例。如果您正苦於以下問題:Java StreamExecutionEnvironment.addSource方法的具體用法?Java StreamExecutionEnvironment.addSource怎麽用?Java StreamExecutionEnvironment.addSource使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.flink.streaming.api.environment.StreamExecutionEnvironment的用法示例。


在下文中一共展示了StreamExecutionEnvironment.addSource方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();

    Properties properties = new Properties();
    properties.load(new FileInputStream("src/main/resources/application.properties"));

    Properties mqttProperties = new Properties();

    // client id = a:<Organization_ID>:<App_Id>
    mqttProperties.setProperty(MQTTSource.CLIENT_ID,
            String.format("a:%s:%s",
                    properties.getProperty("Org_ID"),
                    properties.getProperty("App_Id")));

    // mqtt server url = tcp://<Org_ID>.messaging.internetofthings.ibmcloud.com:1883
    mqttProperties.setProperty(MQTTSource.URL,
            String.format("tcp://%s.messaging.internetofthings.ibmcloud.com:1883",
                    properties.getProperty("Org_ID")));

    // topic = iot-2/type/<Device_Type>/id/<Device_ID>/evt/<Event_Id>/fmt/json
    mqttProperties.setProperty(MQTTSource.TOPIC,
            String.format("iot-2/type/%s/id/%s/evt/%s/fmt/json",
                    properties.getProperty("Device_Type"),
                    properties.getProperty("Device_ID"),
                    properties.getProperty("EVENT_ID")));

    mqttProperties.setProperty(MQTTSource.USERNAME, properties.getProperty("API_Key"));
    mqttProperties.setProperty(MQTTSource.PASSWORD, properties.getProperty("APP_Authentication_Token"));


    MQTTSource mqttSource = new MQTTSource(mqttProperties);
    DataStreamSource<String> tempratureDataSource = env.addSource(mqttSource);
    DataStream<String> stream = tempratureDataSource.map((MapFunction<String, String>) s -> s);
    stream.print();

    env.execute("Temperature Analysis");
}
 
開發者ID:pkhanal,項目名稱:flink-watson-iot-connector,代碼行數:38,代碼來源:DeviceDataAnalysis.java

示例2: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("group.id", "test");

	DataStream<TemperatureEvent> inputEventStream = env.addSource(
			new FlinkKafkaConsumer09<TemperatureEvent>("test", new EventDeserializationSchema(), properties));

	Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
			.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
				private static final long serialVersionUID = 1L;

				public boolean filter(TemperatureEvent value) {
					if (value.getTemperature() >= 26.0) {
						return true;
					}
					return false;
				}
			}).within(Time.seconds(10));

	DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
			.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
				private static final long serialVersionUID = 1L;

				public Alert select(Map<String, TemperatureEvent> event) throws Exception {

					return new Alert("Temperature Rise Detected:" + event.get("first").getTemperature()
							+ " on machine name:" + event.get("first").getMachineName());
				}

			});

	patternStream.print();
	env.execute("CEP on Temperature Sensor");
}
 
開發者ID:PacktPublishing,項目名稱:Mastering-Apache-Flink,代碼行數:38,代碼來源:KafkaApp.java

示例3: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("zookeeper.connect", "localhost:2181");
	properties.setProperty("group.id", "test");
	properties.setProperty("auto.offset.reset", "latest");
	FlinkKafkaConsumer08<DeviceEvent> flinkKafkaConsumer08 = new FlinkKafkaConsumer08<>("device-data",
			new DeviceSchema(), properties);

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<DeviceEvent> messageStream = env.addSource(flinkKafkaConsumer08);
	
	Map<String, String> config = new HashMap<>();
	config.put("cluster.name", "my-application");
	// This instructs the sink to emit after every element, otherwise they would be buffered
	config.put("bulk.flush.max.actions", "1");

	List<InetSocketAddress> transportAddresses = new ArrayList<>();
	transportAddresses.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));

	messageStream.addSink(new ElasticsearchSink<DeviceEvent>(config, transportAddresses, new ESSink()));
	env.execute();
}
 
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:24,代碼來源:FlinkESConnector.java

示例4: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String... args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        DataStream<WikipediaEditEvent> edits = env.addSource(new WikipediaEditsSource());

        edits
            .timeWindowAll(Time.minutes(1))
            .apply(new AllWindowFunction<WikipediaEditEvent, Tuple3<Date, Long, Long>, TimeWindow>() {
                @Override
                public void apply(TimeWindow timeWindow, Iterable<WikipediaEditEvent> iterable, Collector<Tuple3<Date, Long, Long>> collector) throws Exception {
                    long count = 0;
                    long bytesChanged = 0;

                    for (WikipediaEditEvent event : iterable) {
                        count++;
                        bytesChanged += event.getByteDiff();
                    }

                    collector.collect(new Tuple3<>(new Date(timeWindow.getEnd()), count, bytesChanged));
                }
            })
            .print();


        env.execute();
    }
 
開發者ID:mushketyk,項目名稱:flink-examples,代碼行數:27,代碼來源:NumberOfWikiEditsPerWindow.java

示例5: testEventTimeOrderedWriter

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
public void testEventTimeOrderedWriter() throws Exception {
    StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.createLocalEnvironment();

    String streamName = "testEventTimeOrderedWriter";
    SETUP_UTILS.createTestStream(streamName, 1);

    DataStreamSource<Integer> dataStream = execEnv
            .addSource(new IntegerGeneratingSource(false, EVENT_COUNT_PER_SOURCE));

    FlinkPravegaWriter<Integer> pravegaSink = new FlinkPravegaWriter<>(
            SETUP_UTILS.getControllerUri(),
            SETUP_UTILS.getScope(),
            streamName,
            new IntSerializer(),
            event -> "fixedkey");

    FlinkPravegaUtils.writeToPravegaInEventTimeOrder(dataStream, pravegaSink, 1);
    Assert.assertNotNull(execEnv.getExecutionPlan());
}
 
開發者ID:pravega,項目名稱:flink-connectors,代碼行數:21,代碼來源:FlinkPravegaWriterITCase.java

示例6: testMultipleUnboundedPojoStreamSimpleUnion

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
public void testMultipleUnboundedPojoStreamSimpleUnion() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input1 = env.addSource(new RandomEventSource(2), "input1");
    DataStream<Event> input2 = env.addSource(new RandomEventSource(2), "input2");
    DataStream<Event> input3 = env.addSource(new RandomEventSource(2), "input2");
    DataStream<Event> output = SiddhiCEP
        .define("inputStream1", input1, "id", "name", "price", "timestamp")
        .union("inputStream2", input2, "id", "name", "price", "timestamp")
        .union("inputStream3", input3, "id", "name", "price", "timestamp")
        .cql(
            "from inputStream1 select timestamp, id, name, price insert into outputStream;"
                + "from inputStream2 select timestamp, id, name, price insert into outputStream;"
                + "from inputStream3 select timestamp, id, name, price insert into outputStream;"
        )
        .returns("outputStream", Event.class);

    String resultPath = tempFolder.newFile().toURI().toString();
    output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(6, getLineCount(resultPath));
}
 
開發者ID:apache,項目名稱:bahir-flink,代碼行數:23,代碼來源:SiddhiCEPITCase.java

示例7: createProducerTopology

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
private void createProducerTopology(StreamExecutionEnvironment env, AMQSinkConfig<String> config) {
    DataStreamSource<String> stream = env.addSource(new SourceFunction<String>() {
        @Override
        public void run(SourceContext<String> ctx) throws Exception {
            for (int i = 0; i < MESSAGES_NUM; i++) {
                ctx.collect("amq-" + i);
            }
        }

        @Override
        public void cancel() {}
    });


    AMQSink<String> sink = new AMQSink<>(config);
    stream.addSink(sink);
}
 
開發者ID:apache,項目名稱:bahir-flink,代碼行數:18,代碼來源:ActiveMQConnectorITCase.java

示例8: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
    ParameterTool tool = ParameterTool.fromArgs(args);

    String topic = tool.getRequired("kafka.topic");

    Properties kafkaConsumerProps = new Properties();
    kafkaConsumerProps.setProperty("bootstrap.servers", tool.getRequired("kafkabroker"));
    kafkaConsumerProps.setProperty("group.id", tool.getRequired("kafka.groupId"));
    kafkaConsumerProps.setProperty("zookeeper.connect", tool.get("zookeeper.host", "localhost:2181"));
    kafkaConsumerProps.setProperty("auto.offset.reset", tool.getBoolean("from-beginning", false) ? "smallest" : "largest");

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    DataStream<String> textStream = env
            .addSource(new FlinkKafkaConsumer08<>(topic, new SimpleStringSchema(), kafkaConsumerProps));

    textStream.flatMap(new LineSplitter())
        .keyBy(0)
        .sum(1)
        .print();

    env.execute("WordCount from Kafka Example");
}
 
開發者ID:godatadriven,項目名稱:flink-streaming-xke,代碼行數:24,代碼來源:KafkaStreamingWordCount.java

示例9: testDisabledTimestamps

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
/**
 * These check whether timestamps are properly ignored when they are disabled.
 */
@Test
public void testDisabledTimestamps() throws Exception {
	final int numElements = 10;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
	env.setParallelism(PARALLELISM);
	env.getConfig().disableSysoutLogging();

	DataStream<Integer> source1 = env.addSource(new MyNonWatermarkingSource(numElements));
	DataStream<Integer> source2 = env.addSource(new MyNonWatermarkingSource(numElements));

	source1
			.map(new IdentityMap())
			.connect(source2).map(new IdentityCoMap())
			.transform("Custom Operator", BasicTypeInfo.INT_TYPE_INFO, new DisabledTimestampCheckingOperator())
			.addSink(new DiscardingSink<Integer>());

	env.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:TimestampITCase.java

示例10: testTransportClientFails

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test(expected = JobExecutionException.class)
public void testTransportClientFails() throws Exception{
	// this checks whether the TransportClient fails early when there is no cluster to
	// connect to. We don't hava such as test for the Node Client version since that
	// one will block and wait for a cluster to come online

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction());

	Map<String, String> config = Maps.newHashMap();
	// This instructs the sink to emit after every element, otherwise they would be buffered
	config.put(ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	config.put("cluster.name", "my-node-client-cluster");

	// connect to our local node
	config.put("node.local", "true");

	List<TransportAddress> transports = Lists.newArrayList();
	transports.add(new LocalTransportAddress("1"));

	source.addSink(new ElasticsearchSink<>(config, transports, new TestIndexRequestBuilder()));

	env.execute("Elasticsearch Node Client Test");
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:26,代碼來源:ElasticsearchSinkITCase.java

示例11: testUnboundedPojoSourceAndReturnTuple

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
@Test
public void testUnboundedPojoSourceAndReturnTuple() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5));

    DataStream<Tuple4<Long, Integer, String, Double>> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream");

    DataStream<Integer> following = output.map(new MapFunction<Tuple4<Long, Integer, String, Double>, Integer>() {
        @Override
        public Integer map(Tuple4<Long, Integer, String, Double> value) throws Exception {
            return value.f1;
        }
    });
    String resultPath = tempFolder.newFile().toURI().toString();
    following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
}
 
開發者ID:apache,項目名稱:bahir-flink,代碼行數:22,代碼來源:SiddhiCEPITCase.java

示例12: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	ParameterTool parameterTool = ParameterTool.fromArgs(args);
	if (parameterTool.getNumberOfParameters() < 2) {
		System.out.println("Missing parameters!");
		System.out.println("Usage: Kafka --topic <topic> --bootstrap.servers <kafka brokers>");
		return;
	}

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.getConfig().disableSysoutLogging();
	env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));

	// very simple data generator
	DataStream<String> messageStream = env.addSource(new SourceFunction<String>() {
		private static final long serialVersionUID = 6369260445318862378L;
		public boolean running = true;

		@Override
		public void run(SourceContext<String> ctx) throws Exception {
			long i = 0;
			while (this.running) {
				ctx.collect("Element - " + i++);
				Thread.sleep(500);
			}
		}

		@Override
		public void cancel() {
			running = false;
		}
	});

	// write data into Kafka
	messageStream.addSink(new FlinkKafkaProducer08<>(parameterTool.getRequired("topic"), new SimpleStringSchema(), parameterTool.getProperties()));

	env.execute("Write into Kafka example");
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:38,代碼來源:WriteIntoKafka.java

示例13: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String args[]) throws Exception {
	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("zookeeper.connect", "localhost:2181");
	properties.setProperty("group.id", "test");
	properties.setProperty("auto.offset.reset", "latest");  
	FlinkKafkaConsumer08<String> flinkKafkaConsumer08 = new FlinkKafkaConsumer08<>("flink-test",
			new SimpleStringSchema(), properties);

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<String> messageStream = env.addSource(flinkKafkaConsumer08);

	// print() will write the contents of the stream to the TaskManager's
	// standard out stream
	// the rebelance call is causing a repartitioning of the data so that
	// all machines
	// see the messages (for example in cases when "num kafka partitions" <
	// "num flink operators"
	messageStream.rebalance().map(new MapFunction<String, String>() {
		private static final long serialVersionUID = -6867736771747690202L;

		@Override
		public String map(String value) throws Exception {
			return "Kafka and Flink says: " + value;
		}
	}).print();

	env.execute();
}
 
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:30,代碼來源:FlinkKafkaSourceExample.java

示例14: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String... args) throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<WikipediaEditEvent> edits = env.addSource(new WikipediaEditsSource());

    edits.filter((FilterFunction<WikipediaEditEvent>) edit -> {
        return !edit.isBotEdit() && edit.getByteDiff() > 1000;
    })
    .print();

    env.execute();
}
 
開發者ID:mushketyk,項目名稱:flink-examples,代碼行數:12,代碼來源:FilterWikiEdits.java

示例15: main

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(3);

        DataStream<String> simpleStringStream = env.addSource(new EventsGenerator());

        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);

        FlinkLogProducer<String> logProducer = new FlinkLogProducer<String>(new SimpleLogSerializer(), configProps);
        logProducer.setCustomPartitioner(new LogPartitioner<String>() {
            @Override
            public String getHashKey(String element) {
                try {
                    MessageDigest md = MessageDigest.getInstance("MD5");
                    md.update(element.getBytes());
                    String hash = new BigInteger(1, md.digest()).toString(16);
                    while(hash.length() < 32) hash = "0" + hash;
                    return hash;
                } catch (NoSuchAlgorithmException e) {
                }
                return  "0000000000000000000000000000000000000000000000000000000000000000";
            }
        });
        simpleStringStream.addSink(logProducer);

        env.execute("flink log producer");
    }
 
開發者ID:aliyun,項目名稱:aliyun-log-flink-connector,代碼行數:36,代碼來源:ProducerSample.java


注:本文中的org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.addSource方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。