當前位置: 首頁>>代碼示例>>Java>>正文


Java DataStream.print方法代碼示例

本文整理匯總了Java中org.apache.flink.streaming.api.datastream.DataStream.print方法的典型用法代碼示例。如果您正苦於以下問題:Java DataStream.print方法的具體用法?Java DataStream.print怎麽用?Java DataStream.print使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.print方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();

    Properties properties = new Properties();
    properties.load(new FileInputStream("src/main/resources/application.properties"));

    Properties mqttProperties = new Properties();

    // client id = a:<Organization_ID>:<App_Id>
    mqttProperties.setProperty(MQTTSource.CLIENT_ID,
            String.format("a:%s:%s",
                    properties.getProperty("Org_ID"),
                    properties.getProperty("App_Id")));

    // mqtt server url = tcp://<Org_ID>.messaging.internetofthings.ibmcloud.com:1883
    mqttProperties.setProperty(MQTTSource.URL,
            String.format("tcp://%s.messaging.internetofthings.ibmcloud.com:1883",
                    properties.getProperty("Org_ID")));

    // topic = iot-2/type/<Device_Type>/id/<Device_ID>/evt/<Event_Id>/fmt/json
    mqttProperties.setProperty(MQTTSource.TOPIC,
            String.format("iot-2/type/%s/id/%s/evt/%s/fmt/json",
                    properties.getProperty("Device_Type"),
                    properties.getProperty("Device_ID"),
                    properties.getProperty("EVENT_ID")));

    mqttProperties.setProperty(MQTTSource.USERNAME, properties.getProperty("API_Key"));
    mqttProperties.setProperty(MQTTSource.PASSWORD, properties.getProperty("APP_Authentication_Token"));


    MQTTSource mqttSource = new MQTTSource(mqttProperties);
    DataStreamSource<String> tempratureDataSource = env.addSource(mqttSource);
    DataStream<String> stream = tempratureDataSource.map((MapFunction<String, String>) s -> s);
    stream.print();

    env.execute("Temperature Analysis");
}
 
開發者ID:pkhanal,項目名稱:flink-watson-iot-connector,代碼行數:38,代碼來源:DeviceDataAnalysis.java

示例2: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("group.id", "test");

	DataStream<TemperatureEvent> inputEventStream = env.addSource(
			new FlinkKafkaConsumer09<TemperatureEvent>("test", new EventDeserializationSchema(), properties));

	Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
			.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
				private static final long serialVersionUID = 1L;

				public boolean filter(TemperatureEvent value) {
					if (value.getTemperature() >= 26.0) {
						return true;
					}
					return false;
				}
			}).within(Time.seconds(10));

	DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
			.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
				private static final long serialVersionUID = 1L;

				public Alert select(Map<String, TemperatureEvent> event) throws Exception {

					return new Alert("Temperature Rise Detected:" + event.get("first").getTemperature()
							+ " on machine name:" + event.get("first").getMachineName());
				}

			});

	patternStream.print();
	env.execute("CEP on Temperature Sensor");
}
 
開發者ID:PacktPublishing,項目名稱:Mastering-Apache-Flink,代碼行數:38,代碼來源:KafkaApp.java

示例3: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	Properties kinesisConsumerConfig = new Properties();
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, pt.getRequired("region"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accesskey"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, pt.getRequired("secretkey"));

	DataStream<String> kinesis = see.addSource(new FlinkKinesisConsumer<>(
		"flink-test",
		new SimpleStringSchema(),
		kinesisConsumerConfig));

	kinesis.print();

	see.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:21,代碼來源:ConsumeFromKinesis.java

示例4: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
			.url("http://localhost:8080/nifi")
			.portName("Data for Flink")
			.requestBatchCount(5)
			.buildConfig();

	SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
	DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);

	DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
		@Override
		public String map(NiFiDataPacket value) throws Exception {
			return new String(value.getContent(), Charset.defaultCharset());
		}
	});

	dataStream.print();
	env.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:23,代碼來源:NiFiSourceTopologyExample.java

示例5: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<TemperatureEvent> inputEventStream = env.fromElements(new TemperatureEvent("xyz", 22.0),
			new TemperatureEvent("xyz", 20.1), new TemperatureEvent("xyz", 21.1), new TemperatureEvent("xyz", 22.2),
			new TemperatureEvent("xyz", 29.1), new TemperatureEvent("xyz", 22.3), new TemperatureEvent("xyz", 22.1),
			new TemperatureEvent("xyz", 22.4), new TemperatureEvent("xyz", 22.7),
			new TemperatureEvent("xyz", 27.0));

	Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
			.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
				private static final long serialVersionUID = 1L;

				public boolean filter(TemperatureEvent value) {
					if (value.getTemperature() >= 26.0) {
						return true;
					}
					return false;
				}
			}).within(Time.seconds(10));

	DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
			.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
				private static final long serialVersionUID = 1L;

				public Alert select(Map<String, TemperatureEvent> event) throws Exception {

					return new Alert("Temperature Rise Detected");
				}

			});

	patternStream.print();
	env.execute("CEP on Temperature Sensor");
}
 
開發者ID:PacktPublishing,項目名稱:Mastering-Apache-Flink,代碼行數:35,代碼來源:App.java

示例6: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

		if (args.length != 2){
			System.err.println("USAGE:\nSocketTextStreamWordCount <hostname> <port>");
			return;
		}

		String hostName = args[0];
		Integer port = Integer.parseInt(args[1]);

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment
				.getExecutionEnvironment();

		// get input data
		DataStream<String> text = env.socketTextStream(hostName, port);

		DataStream<Tuple2<String, Integer>> counts =
		// split up the lines in pairs (2-tuples) containing: (word,1)
		text.flatMap(new LineSplitter())
		// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0)
				.sum(1);

		counts.print();

		// execute program
		env.execute("Java WordCount from SocketTextStream Example");
	}
 
開發者ID:dineshtrivedi,項目名稱:flink-java-project,代碼行數:30,代碼來源:SocketTextStreamWordCount.java

示例7: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // get CLI parameters
        ParameterTool parameters = ParameterTool.fromArgs(args);
        String topic = parameters.getRequired("topic");
        String groupId = parameters.get("group-id", "flink-kafka-consumer");
        String propertiesFile = parameters.getRequired("env");
        ParameterTool envProperties = ParameterTool.fromPropertiesFile(propertiesFile);
        String schemaRegistryUrl = envProperties.getRequired("registry_url");
        String bootstrapServers = envProperties.getRequired("brokers");
        String zookeeperConnect = envProperties.getRequired("zookeeper");

        // setup Kafka sink
        ConfluentAvroDeserializationSchema deserSchema = new ConfluentAvroDeserializationSchema(schemaRegistryUrl);
        Properties kafkaProps = new Properties();
        kafkaProps.setProperty("bootstrap.servers", bootstrapServers);
        kafkaProps.setProperty("zookeeper.connect", zookeeperConnect);
        kafkaProps.setProperty("group.id", groupId);
        FlinkKafkaConsumer08<String> flinkKafkaConsumer = new FlinkKafkaConsumer08<String>(topic, deserSchema, kafkaProps);

        DataStream<String> kafkaStream = env.addSource(flinkKafkaConsumer);

        DataStream<Integer> counts = kafkaStream
                .map(new MapFunction<String, Integer>() {
                    public Integer map(String s) throws Exception {
                        return 1;
                    }
                })
                .timeWindowAll(Time.seconds(3))
                .sum(0);

        counts.print();

        env.execute("Flink Kafka Java Example");
    }
 
開發者ID:seanpquig,項目名稱:flink-streaming-confluent,代碼行數:38,代碼來源:FlinkKafkaExample.java

示例8: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<Sentence> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts = text
				// split up the lines in pairs (2-tuples) containing: (word,1)
				// this is done by a bolt that is wrapped accordingly
				.transform("BoltTokenizerPojo",
						TypeExtractor.getForObject(new Tuple2<String, Integer>("", 0)),
						new BoltWrapper<Sentence, Tuple2<String, Integer>>(new BoltTokenizerByName()))
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with POJO bolt tokenizer");
	}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:32,代碼來源:BoltTokenizerWordCountPojo.java

示例9: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<Tuple1<String>> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts = text
				// split up the lines in pairs (2-tuples) containing: (word,1)
				// this is done by a Storm bolt that is wrapped accordingly
				.transform(
						"BoltTokenizerWithNames",
						TypeExtractor.getForObject(new Tuple2<String, Integer>("", 0)),
						new BoltWrapper<Tuple1<String>, Tuple2<String, Integer>>(
								new BoltTokenizerByName(), new Fields("sentence")))
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with schema bolt tokenizer");
	}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:34,代碼來源:BoltTokenizerWordCountWithNames.java

示例10: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

		// Checking input parameters
		final ParameterTool params = ParameterTool.fromArgs(args);

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// make parameters available in the web interface
		env.getConfig().setGlobalJobParameters(params);

		// get input data
		DataStream<String> text;
		if (params.has("input")) {
			// read the text file from given input path
			text = env.readTextFile(params.get("input"));
		} else {
			System.out.println("Executing WordCount example with default input data set.");
			System.out.println("Use --input to specify file input.");
			// get default test text data
			text = env.fromElements(WordCountData.WORDS);
		}

		DataStream<Tuple2<String, Integer>> counts =
		// split up the lines in pairs (2-tuples) containing: (word,1)
		text.flatMap(new Tokenizer())
		// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (params.has("output")) {
			counts.writeAsText(params.get("output"));
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount");
	}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:41,代碼來源:WordCount.java

示例11: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

        // set up the execution environment
        final StreamExecutionEnvironment env
                = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);

        // generate stream data will become anomalous after 100000 milli seconds
        DataStream<Tuple3<String, Long,Double>> inStream
                = env.addSource(new PoissonFrequencyGenerator1(10000));

        // Choose and a History defining what the latest window will be compared to. In this case each new window will be compared to the aggregation of the last two windows.
        History hist
                = new HistoryTrailing(2);

        // Choose a distribution the value is supposed to follow and initialize it with a history.
        PoissonFreqAnomaly<String,Tuple3<String,Long,Double>> anomalyDetector
                = new PoissonFreqAnomaly<>(hist);

        // feed the stream into the model and get back a stream of AnomalyResults. For details see the different internal classes defined below.
        DataStream<Tuple2<String,AnomalyResult>> result
                = anomalyDetector.getAnomalySteam(inStream,new KExtract(),Time.seconds(5));

        // print the result
        result.print();

        env.execute("Simple Exponential Example Keyed");
    }
 
開發者ID:sics-dna,項目名稱:isc4flink,代碼行數:29,代碼來源:KeyedPoissonExample.java

示例12: testSimple

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Test
public void testSimple() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);

    final int NUM_CYCLES = 400;
    final int INPUT_GROUP_COUNT = 7; // Days of Week
    List<TestHarness.DayDemoRecord> records = IntStream.range(0, NUM_CYCLES)
            .flatMap(c -> IntStream.range(0, INPUT_GROUP_COUNT))
            .mapToObj(day -> new TestHarness.DayDemoRecord(day))
            .collect(Collectors.toList());

    DataStream<TestHarness.DayDemoRecord> input = env.fromCollection(records);

    DataStream<Tuple3<Integer,Double,Double>> result = HTM
            .learn(input, new TestHarness.DayDemoNetworkFactory())
            .resetOn(new ResetFunction<TestHarness.DayDemoRecord>() {
                @Override
                public boolean reset(TestHarness.DayDemoRecord value) throws Exception {
                    return value.dayOfWeek == 0;
                }
            })
            .select(new InferenceSelectFunction<TestHarness.DayDemoRecord, Tuple3<Integer,Double,Double>>() {
                @Override
                public Tuple3<Integer,Double,Double> select(Tuple2<TestHarness.DayDemoRecord,NetworkInference> inference) throws Exception {
                    return new Tuple3<>(
                            inference.f0.dayOfWeek,
                            (Double) inference.f1.getClassification("dayOfWeek").getMostProbableValue(1),
                            inference.f1.getAnomalyScore());
                }
            });

    result.writeAsCsv(resultPath, FileSystem.WriteMode.OVERWRITE);

    result.print();

    env.execute();
}
 
開發者ID:htm-community,項目名稱:flink-htm,代碼行數:39,代碼來源:HTMIntegrationTest.java

示例13: testCheckpointWithKeyedStream

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
 * Test the checkpoint behavior of the HTM operator.
 * @throws Exception
 */
@Test
public void testCheckpointWithKeyedStream() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    env.enableCheckpointing(5000);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));

    DataStream<TestHarness.DayDemoRecord> source = env
            .addSource(new TestHarness.DayDemoRecordSourceFunction(2, true))
            .keyBy("dayOfWeek");

    DataStream<Tuple3<Integer,Double,Double>> result =
            HTM.learn(source, new TestHarness.DayDemoNetworkFactory())
                    .select(new InferenceSelectFunction<TestHarness.DayDemoRecord, Tuple3<Integer,Double,Double>>() {
                        @Override
                        public Tuple3<Integer,Double,Double> select(Tuple2<TestHarness.DayDemoRecord,NetworkInference> inference) throws Exception {
                            return new Tuple3<>(
                                    inference.f0.dayOfWeek,
                                    (Double) inference.f1.getClassification("dayOfWeek").getMostProbableValue(1),
                                    inference.f1.getAnomalyScore());
                        }
                    });

    result.print();

    env.execute();
}
 
開發者ID:htm-community,項目名稱:flink-htm,代碼行數:32,代碼來源:HTMIntegrationTest.java

示例14: runFailOnNoBrokerTest

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
 * and a wrong broker was specified.
 *
 * @throws Exception
 */
public void runFailOnNoBrokerTest() throws Exception {
	try {
		Properties properties = new Properties();

		StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
		see.getConfig().disableSysoutLogging();
		see.setRestartStrategy(RestartStrategies.noRestart());
		see.setParallelism(1);

		// use wrong ports for the consumers
		properties.setProperty("bootstrap.servers", "localhost:80");
		properties.setProperty("zookeeper.connect", "localhost:80");
		properties.setProperty("group.id", "test");
		properties.setProperty("request.timeout.ms", "3000"); // let the test fail fast
		properties.setProperty("socket.timeout.ms", "3000");
		properties.setProperty("session.timeout.ms", "2000");
		properties.setProperty("fetch.max.wait.ms", "2000");
		properties.setProperty("heartbeat.interval.ms", "1000");
		properties.putAll(secureProps);
		FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
		DataStream<String> stream = see.addSource(source);
		stream.print();
		see.execute("No broker test");
	} catch (JobExecutionException jee) {
		if (kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10") || kafkaServer.getVersion().equals("0.11")) {
			assertTrue(jee.getCause() instanceof TimeoutException);

			TimeoutException te = (TimeoutException) jee.getCause();

			assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
		} else {
			assertTrue(jee.getCause() instanceof RuntimeException);

			RuntimeException re = (RuntimeException) jee.getCause();

			assertTrue(re.getMessage().contains("Unable to retrieve any partitions"));
		}
	}
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:46,代碼來源:KafkaConsumerTestBase.java

示例15: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<String> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts =
				// split up the lines in pairs (2-tuples) containing: (word,1)
				text.flatMap(new Tokenizer())
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with spout source");
	}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:29,代碼來源:SpoutSourceWordCount.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.print方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。