当前位置: 首页>>代码示例>>Java>>正文


Java DataStream.print方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStream.print方法的典型用法代码示例。如果您正苦于以下问题:Java DataStream.print方法的具体用法?Java DataStream.print怎么用?Java DataStream.print使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.print方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();

    Properties properties = new Properties();
    properties.load(new FileInputStream("src/main/resources/application.properties"));

    Properties mqttProperties = new Properties();

    // client id = a:<Organization_ID>:<App_Id>
    mqttProperties.setProperty(MQTTSource.CLIENT_ID,
            String.format("a:%s:%s",
                    properties.getProperty("Org_ID"),
                    properties.getProperty("App_Id")));

    // mqtt server url = tcp://<Org_ID>.messaging.internetofthings.ibmcloud.com:1883
    mqttProperties.setProperty(MQTTSource.URL,
            String.format("tcp://%s.messaging.internetofthings.ibmcloud.com:1883",
                    properties.getProperty("Org_ID")));

    // topic = iot-2/type/<Device_Type>/id/<Device_ID>/evt/<Event_Id>/fmt/json
    mqttProperties.setProperty(MQTTSource.TOPIC,
            String.format("iot-2/type/%s/id/%s/evt/%s/fmt/json",
                    properties.getProperty("Device_Type"),
                    properties.getProperty("Device_ID"),
                    properties.getProperty("EVENT_ID")));

    mqttProperties.setProperty(MQTTSource.USERNAME, properties.getProperty("API_Key"));
    mqttProperties.setProperty(MQTTSource.PASSWORD, properties.getProperty("APP_Authentication_Token"));


    MQTTSource mqttSource = new MQTTSource(mqttProperties);
    DataStreamSource<String> tempratureDataSource = env.addSource(mqttSource);
    DataStream<String> stream = tempratureDataSource.map((MapFunction<String, String>) s -> s);
    stream.print();

    env.execute("Temperature Analysis");
}
 
开发者ID:pkhanal,项目名称:flink-watson-iot-connector,代码行数:38,代码来源:DeviceDataAnalysis.java

示例2: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	Properties properties = new Properties();
	properties.setProperty("bootstrap.servers", "localhost:9092");
	properties.setProperty("group.id", "test");

	DataStream<TemperatureEvent> inputEventStream = env.addSource(
			new FlinkKafkaConsumer09<TemperatureEvent>("test", new EventDeserializationSchema(), properties));

	Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
			.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
				private static final long serialVersionUID = 1L;

				public boolean filter(TemperatureEvent value) {
					if (value.getTemperature() >= 26.0) {
						return true;
					}
					return false;
				}
			}).within(Time.seconds(10));

	DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
			.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
				private static final long serialVersionUID = 1L;

				public Alert select(Map<String, TemperatureEvent> event) throws Exception {

					return new Alert("Temperature Rise Detected:" + event.get("first").getTemperature()
							+ " on machine name:" + event.get("first").getMachineName());
				}

			});

	patternStream.print();
	env.execute("CEP on Temperature Sensor");
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Flink,代码行数:38,代码来源:KafkaApp.java

示例3: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	Properties kinesisConsumerConfig = new Properties();
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_REGION, pt.getRequired("region"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, pt.getRequired("accesskey"));
	kinesisConsumerConfig.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, pt.getRequired("secretkey"));

	DataStream<String> kinesis = see.addSource(new FlinkKinesisConsumer<>(
		"flink-test",
		new SimpleStringSchema(),
		kinesisConsumerConfig));

	kinesis.print();

	see.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:21,代码来源:ConsumeFromKinesis.java

示例4: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
			.url("http://localhost:8080/nifi")
			.portName("Data for Flink")
			.requestBatchCount(5)
			.buildConfig();

	SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
	DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);

	DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
		@Override
		public String map(NiFiDataPacket value) throws Exception {
			return new String(value.getContent(), Charset.defaultCharset());
		}
	});

	dataStream.print();
	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:NiFiSourceTopologyExample.java

示例5: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<TemperatureEvent> inputEventStream = env.fromElements(new TemperatureEvent("xyz", 22.0),
			new TemperatureEvent("xyz", 20.1), new TemperatureEvent("xyz", 21.1), new TemperatureEvent("xyz", 22.2),
			new TemperatureEvent("xyz", 29.1), new TemperatureEvent("xyz", 22.3), new TemperatureEvent("xyz", 22.1),
			new TemperatureEvent("xyz", 22.4), new TemperatureEvent("xyz", 22.7),
			new TemperatureEvent("xyz", 27.0));

	Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
			.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
				private static final long serialVersionUID = 1L;

				public boolean filter(TemperatureEvent value) {
					if (value.getTemperature() >= 26.0) {
						return true;
					}
					return false;
				}
			}).within(Time.seconds(10));

	DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
			.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
				private static final long serialVersionUID = 1L;

				public Alert select(Map<String, TemperatureEvent> event) throws Exception {

					return new Alert("Temperature Rise Detected");
				}

			});

	patternStream.print();
	env.execute("CEP on Temperature Sensor");
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Flink,代码行数:35,代码来源:App.java

示例6: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		if (args.length != 2){
			System.err.println("USAGE:\nSocketTextStreamWordCount <hostname> <port>");
			return;
		}

		String hostName = args[0];
		Integer port = Integer.parseInt(args[1]);

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment
				.getExecutionEnvironment();

		// get input data
		DataStream<String> text = env.socketTextStream(hostName, port);

		DataStream<Tuple2<String, Integer>> counts =
		// split up the lines in pairs (2-tuples) containing: (word,1)
		text.flatMap(new LineSplitter())
		// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0)
				.sum(1);

		counts.print();

		// execute program
		env.execute("Java WordCount from SocketTextStream Example");
	}
 
开发者ID:dineshtrivedi,项目名称:flink-java-project,代码行数:30,代码来源:SocketTextStreamWordCount.java

示例7: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // get CLI parameters
        ParameterTool parameters = ParameterTool.fromArgs(args);
        String topic = parameters.getRequired("topic");
        String groupId = parameters.get("group-id", "flink-kafka-consumer");
        String propertiesFile = parameters.getRequired("env");
        ParameterTool envProperties = ParameterTool.fromPropertiesFile(propertiesFile);
        String schemaRegistryUrl = envProperties.getRequired("registry_url");
        String bootstrapServers = envProperties.getRequired("brokers");
        String zookeeperConnect = envProperties.getRequired("zookeeper");

        // setup Kafka sink
        ConfluentAvroDeserializationSchema deserSchema = new ConfluentAvroDeserializationSchema(schemaRegistryUrl);
        Properties kafkaProps = new Properties();
        kafkaProps.setProperty("bootstrap.servers", bootstrapServers);
        kafkaProps.setProperty("zookeeper.connect", zookeeperConnect);
        kafkaProps.setProperty("group.id", groupId);
        FlinkKafkaConsumer08<String> flinkKafkaConsumer = new FlinkKafkaConsumer08<String>(topic, deserSchema, kafkaProps);

        DataStream<String> kafkaStream = env.addSource(flinkKafkaConsumer);

        DataStream<Integer> counts = kafkaStream
                .map(new MapFunction<String, Integer>() {
                    public Integer map(String s) throws Exception {
                        return 1;
                    }
                })
                .timeWindowAll(Time.seconds(3))
                .sum(0);

        counts.print();

        env.execute("Flink Kafka Java Example");
    }
 
开发者ID:seanpquig,项目名称:flink-streaming-confluent,代码行数:38,代码来源:FlinkKafkaExample.java

示例8: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<Sentence> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts = text
				// split up the lines in pairs (2-tuples) containing: (word,1)
				// this is done by a bolt that is wrapped accordingly
				.transform("BoltTokenizerPojo",
						TypeExtractor.getForObject(new Tuple2<String, Integer>("", 0)),
						new BoltWrapper<Sentence, Tuple2<String, Integer>>(new BoltTokenizerByName()))
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with POJO bolt tokenizer");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:32,代码来源:BoltTokenizerWordCountPojo.java

示例9: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<Tuple1<String>> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts = text
				// split up the lines in pairs (2-tuples) containing: (word,1)
				// this is done by a Storm bolt that is wrapped accordingly
				.transform(
						"BoltTokenizerWithNames",
						TypeExtractor.getForObject(new Tuple2<String, Integer>("", 0)),
						new BoltWrapper<Tuple1<String>, Tuple2<String, Integer>>(
								new BoltTokenizerByName(), new Fields("sentence")))
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with schema bolt tokenizer");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:BoltTokenizerWordCountWithNames.java

示例10: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// Checking input parameters
		final ParameterTool params = ParameterTool.fromArgs(args);

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// make parameters available in the web interface
		env.getConfig().setGlobalJobParameters(params);

		// get input data
		DataStream<String> text;
		if (params.has("input")) {
			// read the text file from given input path
			text = env.readTextFile(params.get("input"));
		} else {
			System.out.println("Executing WordCount example with default input data set.");
			System.out.println("Use --input to specify file input.");
			// get default test text data
			text = env.fromElements(WordCountData.WORDS);
		}

		DataStream<Tuple2<String, Integer>> counts =
		// split up the lines in pairs (2-tuples) containing: (word,1)
		text.flatMap(new Tokenizer())
		// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (params.has("output")) {
			counts.writeAsText(params.get("output"));
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:WordCount.java

示例11: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        // set up the execution environment
        final StreamExecutionEnvironment env
                = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);

        // generate stream data will become anomalous after 100000 milli seconds
        DataStream<Tuple3<String, Long,Double>> inStream
                = env.addSource(new PoissonFrequencyGenerator1(10000));

        // Choose and a History defining what the latest window will be compared to. In this case each new window will be compared to the aggregation of the last two windows.
        History hist
                = new HistoryTrailing(2);

        // Choose a distribution the value is supposed to follow and initialize it with a history.
        PoissonFreqAnomaly<String,Tuple3<String,Long,Double>> anomalyDetector
                = new PoissonFreqAnomaly<>(hist);

        // feed the stream into the model and get back a stream of AnomalyResults. For details see the different internal classes defined below.
        DataStream<Tuple2<String,AnomalyResult>> result
                = anomalyDetector.getAnomalySteam(inStream,new KExtract(),Time.seconds(5));

        // print the result
        result.print();

        env.execute("Simple Exponential Example Keyed");
    }
 
开发者ID:sics-dna,项目名称:isc4flink,代码行数:29,代码来源:KeyedPoissonExample.java

示例12: testSimple

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testSimple() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);

    final int NUM_CYCLES = 400;
    final int INPUT_GROUP_COUNT = 7; // Days of Week
    List<TestHarness.DayDemoRecord> records = IntStream.range(0, NUM_CYCLES)
            .flatMap(c -> IntStream.range(0, INPUT_GROUP_COUNT))
            .mapToObj(day -> new TestHarness.DayDemoRecord(day))
            .collect(Collectors.toList());

    DataStream<TestHarness.DayDemoRecord> input = env.fromCollection(records);

    DataStream<Tuple3<Integer,Double,Double>> result = HTM
            .learn(input, new TestHarness.DayDemoNetworkFactory())
            .resetOn(new ResetFunction<TestHarness.DayDemoRecord>() {
                @Override
                public boolean reset(TestHarness.DayDemoRecord value) throws Exception {
                    return value.dayOfWeek == 0;
                }
            })
            .select(new InferenceSelectFunction<TestHarness.DayDemoRecord, Tuple3<Integer,Double,Double>>() {
                @Override
                public Tuple3<Integer,Double,Double> select(Tuple2<TestHarness.DayDemoRecord,NetworkInference> inference) throws Exception {
                    return new Tuple3<>(
                            inference.f0.dayOfWeek,
                            (Double) inference.f1.getClassification("dayOfWeek").getMostProbableValue(1),
                            inference.f1.getAnomalyScore());
                }
            });

    result.writeAsCsv(resultPath, FileSystem.WriteMode.OVERWRITE);

    result.print();

    env.execute();
}
 
开发者ID:htm-community,项目名称:flink-htm,代码行数:39,代码来源:HTMIntegrationTest.java

示例13: testCheckpointWithKeyedStream

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Test the checkpoint behavior of the HTM operator.
 * @throws Exception
 */
@Test
public void testCheckpointWithKeyedStream() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(1);
    env.enableCheckpointing(5000);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));

    DataStream<TestHarness.DayDemoRecord> source = env
            .addSource(new TestHarness.DayDemoRecordSourceFunction(2, true))
            .keyBy("dayOfWeek");

    DataStream<Tuple3<Integer,Double,Double>> result =
            HTM.learn(source, new TestHarness.DayDemoNetworkFactory())
                    .select(new InferenceSelectFunction<TestHarness.DayDemoRecord, Tuple3<Integer,Double,Double>>() {
                        @Override
                        public Tuple3<Integer,Double,Double> select(Tuple2<TestHarness.DayDemoRecord,NetworkInference> inference) throws Exception {
                            return new Tuple3<>(
                                    inference.f0.dayOfWeek,
                                    (Double) inference.f1.getClassification("dayOfWeek").getMostProbableValue(1),
                                    inference.f1.getAnomalyScore());
                        }
                    });

    result.print();

    env.execute();
}
 
开发者ID:htm-community,项目名称:flink-htm,代码行数:32,代码来源:HTMIntegrationTest.java

示例14: runFailOnNoBrokerTest

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
 * and a wrong broker was specified.
 *
 * @throws Exception
 */
public void runFailOnNoBrokerTest() throws Exception {
	try {
		Properties properties = new Properties();

		StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
		see.getConfig().disableSysoutLogging();
		see.setRestartStrategy(RestartStrategies.noRestart());
		see.setParallelism(1);

		// use wrong ports for the consumers
		properties.setProperty("bootstrap.servers", "localhost:80");
		properties.setProperty("zookeeper.connect", "localhost:80");
		properties.setProperty("group.id", "test");
		properties.setProperty("request.timeout.ms", "3000"); // let the test fail fast
		properties.setProperty("socket.timeout.ms", "3000");
		properties.setProperty("session.timeout.ms", "2000");
		properties.setProperty("fetch.max.wait.ms", "2000");
		properties.setProperty("heartbeat.interval.ms", "1000");
		properties.putAll(secureProps);
		FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
		DataStream<String> stream = see.addSource(source);
		stream.print();
		see.execute("No broker test");
	} catch (JobExecutionException jee) {
		if (kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10") || kafkaServer.getVersion().equals("0.11")) {
			assertTrue(jee.getCause() instanceof TimeoutException);

			TimeoutException te = (TimeoutException) jee.getCause();

			assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
		} else {
			assertTrue(jee.getCause() instanceof RuntimeException);

			RuntimeException re = (RuntimeException) jee.getCause();

			assertTrue(re.getMessage().contains("Unable to retrieve any partitions"));
		}
	}
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:46,代码来源:KafkaConsumerTestBase.java

示例15: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<String> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts =
				// split up the lines in pairs (2-tuples) containing: (word,1)
				text.flatMap(new Tokenizer())
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with spout source");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:29,代码来源:SpoutSourceWordCount.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.print方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。