当前位置: 首页>>代码示例>>Java>>正文


Java DataStream.writeAsText方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStream.writeAsText方法的典型用法代码示例。如果您正苦于以下问题:Java DataStream.writeAsText方法的具体用法?Java DataStream.writeAsText怎么用?Java DataStream.writeAsText使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.writeAsText方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testUnboundedPojoSourceButReturnInvalidTupleType

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test(expected = InvalidTypesException.class)
public void testUnboundedPojoSourceButReturnInvalidTupleType() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5).closeDelay(1500));

    DataStream<Tuple5<Long, Integer, String, Double, Long>> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream");

    DataStream<Long> following = output.map(new MapFunction<Tuple5<Long, Integer, String, Double, Long>, Long>() {
        @Override
        public Long map(Tuple5<Long, Integer, String, Double, Long> value) throws Exception {
            return value.f0;
        }
    });

    String resultPath = tempFolder.newFile().toURI().toString();
    following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
    env.execute();
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:24,代码来源:SiddhiCEPITCase.java

示例2: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    final String input = "C:\\dev\\github\\clojured-taxi-rides\\resources\\datasets\\nycTaxiRides.gz";

    final int maxEventDelay = 60;       // events are out of order by max 60 seconds
    final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

    // set up streaming execution environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

    // start the data generator
    DataStream<TaxiRide> rides = env.addSource(
            new TaxiRideSource(input, maxEventDelay, servingSpeedFactor));

    DataStream<TaxiRide> filteredRides = rides
            // filter out rides that do not start or stop in NYC
            .filter(new NYCFilter());

    // print the filtered stream
    //filteredRides.print();
    filteredRides.writeAsText("file:\\\\C:\\Users\\ht\\rides_java.txt");

    // run the cleansing pipeline
    env.execute("Taxi Ride Cleansing");
}
 
开发者ID:thr0n,项目名称:clojured-taxi-rides,代码行数:26,代码来源:RideCleansing.java

示例3: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<String> dataStream = streamExecutionEnvironment.readTextFile("file:///tmp/flink-esper-input");
    
    EsperStream<String> esperStream = Esper.pattern(dataStream, "select bytes from String");

    DataStream<String> result = esperStream.select(new EsperSelectFunction<String>() {
        private static final long serialVersionUID = 7093943872082195786L;

        @Override
        public String select(EventBean eventBean) throws Exception {
            return new String((byte[]) eventBean.get("bytes"));
        }
    });

    result.writeAsText("file:///tmp/flink-esper-output");

    streamExecutionEnvironment.execute("Simple Flink Esper Example");
}
 
开发者ID:phil3k3,项目名称:flink-esper,代码行数:21,代码来源:FlinkTestClass.java

示例4: testUnboundedTupleSourceAndReturnTuple

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testUnboundedTupleSourceAndReturnTuple() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Tuple4<Integer, String, Double, Long>> input = env
		.addSource(new RandomTupleSource(5).closeDelay(1500)).keyBy(1);

	DataStream<Tuple4<Long, Integer, String, Double>> output = SiddhiCEP
		.define("inputStream", input, "id", "name", "price", "timestamp")
		.cql("from inputStream select timestamp, id, name, price insert into  outputStream")
		.returns("outputStream");

	String resultPath = tempFolder.newFile().toURI().toString();
	output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
	assertEquals(5, getLineCount(resultPath));
}
 
开发者ID:haoch,项目名称:flink-siddhi,代码行数:17,代码来源:SiddhiCEPITCase.java

示例5: testTriggerUndefinedStreamException

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test(expected = UndefinedStreamException.class)
public void testTriggerUndefinedStreamException() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input1 = env.addSource(new RandomEventSource(5), "input1");

    SiddhiCEP cep = SiddhiCEP.getSiddhiEnvironment(env);
    cep.registerStream("inputStream1", input1.keyBy("id"), "id", "name", "price", "timestamp");

    DataStream<Map<String, Object>> output = cep
        .from("inputStream1").union("inputStream2")
        .cql(
            "from inputStream1#window.length(5) as s1 "
                + "join inputStream2#window.time(500) as s2 "
                + "on s1.id == s2.id "
                + "select s1.timestamp as t, s1.name as n, s1.price as p1, s2.price as p2 "
                + "insert into JoinStream;"
        )
        .returnAsMap("JoinStream");

    String resultPath = tempFolder.newFile().toURI().toString();
    output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:24,代码来源:SiddhiCEPITCase.java

示例6: testMultipleUnboundedPojoStreamUnionAndJoinWithWindow

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * @see <a href="https://docs.wso2.com/display/CEP300/Joins">https://docs.wso2.com/display/CEP300/Joins</a>
 */
@Test
public void testMultipleUnboundedPojoStreamUnionAndJoinWithWindow() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Event> input1 = env.addSource(new RandomEventSource(5), "input1");
	DataStream<Event> input2 = env.addSource(new RandomEventSource(5), "input2");

	DataStream<? extends Map> output = SiddhiCEP
		.define("inputStream1", input1.keyBy("id"), "id", "name", "price", "timestamp")
		.union("inputStream2", input2.keyBy("id"), "id", "name", "price", "timestamp")
		.cql(
			"from inputStream1#window.length(5) as s1 "
				+ "join inputStream2#window.time(500) as s2 "
				+ "on s1.id == s2.id "
				+ "select s1.timestamp as t, s1.name as n, s1.price as p1, s2.price as p2 "
				+ "insert into JoinStream;"
		)
		.returnAsMap("JoinStream");

	String resultPath = tempFolder.newFile().toURI().toString();
	output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
	assertEquals(5, getLineCount(resultPath));
}
 
开发者ID:haoch,项目名称:flink-siddhi,代码行数:27,代码来源:SiddhiCEPITCase.java

示例7: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<String> text = getTextDataStream(env);

		final DataStream<String> exclaimed = text
				.map(new ExclamationMap())
				.map(new ExclamationMap());

		// emit result
		if (fileOutput) {
			exclaimed.writeAsText(outputPath);
		} else {
			exclaimed.print();
		}

		// execute program
		env.execute("Streaming Exclamation with Storm spout source");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:ExclamationWithSpout.java

示例8: testTriggerUndefinedStreamException

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test(expected = UndefinedStreamException.class)
public void testTriggerUndefinedStreamException() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Event> input1 = env.addSource(new RandomEventSource(5), "input1");

	SiddhiCEP cep = SiddhiCEP.getSiddhiEnvironment(env);
	cep.registerStream("inputStream1", input1.keyBy("id"), "id", "name", "price", "timestamp");

	DataStream<Map<String, Object>> output = cep
		.from("inputStream1").union("inputStream2")
		.cql(
			"from inputStream1#window.length(5) as s1 "
				+ "join inputStream2#window.time(500) as s2 "
				+ "on s1.id == s2.id "
				+ "select s1.timestamp as t, s1.name as n, s1.price as p1, s2.price as p2 "
				+ "insert into JoinStream;"
		)
		.returnAsMap("JoinStream");

	String resultPath = tempFolder.newFile().toURI().toString();
	output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
}
 
开发者ID:haoch,项目名称:flink-siddhi,代码行数:24,代码来源:SiddhiCEPITCase.java

示例9: testUnboundedPojoStreamSimplePatternMatch

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * @see <a href="https://docs.wso2.com/display/CEP300/Joins">https://docs.wso2.com/display/CEP300/Patterns</a>
 */
@Test
public void testUnboundedPojoStreamSimplePatternMatch() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

    DataStream<Event> input1 = env.addSource(new RandomEventSource(5).closeDelay(1500), "input1");
    DataStream<Event> input2 = env.addSource(new RandomEventSource(5).closeDelay(1500), "input2");

    DataStream<Map<String, Object>> output = SiddhiCEP
        .define("inputStream1", input1.keyBy("name"), "id", "name", "price", "timestamp")
        .union("inputStream2", input2.keyBy("name"), "id", "name", "price", "timestamp")
        .cql(
            "from every s1 = inputStream1[id == 2] "
                + " -> s2 = inputStream2[id == 3] "
                + "select s1.id as id_1, s1.name as name_1, s2.id as id_2, s2.name as name_2 "
                + "insert into outputStream"
        )
        .returnAsMap("outputStream");

    String resultPath = tempFolder.newFile().toURI().toString();
    output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(1, getLineCount(resultPath));
    compareResultsByLinesInMemory("{id_1=2, name_1=test_event, id_2=3, name_2=test_event}", resultPath);
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:29,代码来源:SiddhiCEPITCase.java

示例10: testUnboundedPojoStreamAndReturnPojo

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testUnboundedPojoStreamAndReturnPojo() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5));
    input.assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Event>() {
        @Override
        public long extractAscendingTimestamp(Event element) {
            return element.getTimestamp();
        }
    });

    DataStream<Event> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream", Event.class);

    String resultPath = tempFolder.newFile().toURI().toString();
    output.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:22,代码来源:SiddhiCEPITCase.java

示例11: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(params);
        env.setParallelism(2);
        env.enableCheckpointing(5000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        env.setStateBackend(new FsStateBackend("file:///Users/zhouzhou/Binary/flink-1.3.2/testcheckpoints/"));
        RawLogGroupListDeserializer deserializer = new RawLogGroupListDeserializer();
        Properties configProps = new Properties();
        configProps.put(ConfigConstants.LOG_ENDPOINT, sEndpoint);
        configProps.put(ConfigConstants.LOG_ACCESSSKEYID, sAccessKeyId);
        configProps.put(ConfigConstants.LOG_ACCESSKEY, sAccessKey);
        configProps.put(ConfigConstants.LOG_PROJECT, sProject);
        configProps.put(ConfigConstants.LOG_LOGSTORE, sLogstore);
        configProps.put(ConfigConstants.LOG_MAX_NUMBER_PER_FETCH, "10");
        configProps.put(ConfigConstants.LOG_CONSUMER_BEGIN_POSITION, Consts.LOG_FROM_CHECKPOINT);
        configProps.put(ConfigConstants.LOG_CONSUMERGROUP, "23_ots_sla_etl_product");
        DataStream<RawLogGroupList> logTestStream = env.addSource(
                new FlinkLogConsumer<RawLogGroupList>(deserializer, configProps)
        );

        logTestStream.writeAsText("/Users/zhouzhou/Binary/flink-1.3.2/data/newb.txt." + System.nanoTime());
        env.execute("flink log connector");
    }
 
开发者ID:aliyun,项目名称:aliyun-log-flink-connector,代码行数:29,代码来源:ConsumerSample.java

示例12: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        final int popThreshold = 20; // threshold for popular places

        // set up streaming execution environment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        env.getConfig().setAutoWatermarkInterval(1000);

        // configure the Kafka consumer
        Properties kafkaProps = new Properties();
        kafkaProps.setProperty("zookeeper.connect", LOCAL_ZOOKEEPER_HOST);
        kafkaProps.setProperty("bootstrap.servers", LOCAL_KAFKA_BROKER);
        kafkaProps.setProperty("group.id", RIDE_SPEED_GROUP);
        // always read the Kafka topic from the start
        kafkaProps.setProperty("auto.offset.reset", "earliest");

        // create a Kafka consumer
        FlinkKafkaConsumer09<TaxiRide> consumer = new FlinkKafkaConsumer09<>(
                "cleansedRides",
                new TaxiRideSchema(),
                kafkaProps);
        // assign a timestamp extractor to the consumer
        consumer.assignTimestampsAndWatermarks(new TaxiRideTSExtractor());

        // create a TaxiRide data stream
        DataStream<TaxiRide> rides = env.addSource(consumer);

        // find popular places
        DataStream<Tuple5<Float, Float, Long, Boolean, Integer>> popularPlaces = rides
                // match ride to grid cell and event type (start or end)
                .map(new GridCellMatcher())
                // partition by cell id and event type
                .keyBy(0, 1)
                // build sliding window
                .timeWindow(Time.minutes(15), Time.minutes(5))
                // count ride events in window
                .apply(new RideCounter())
                // filter by popularity threshold
                .filter(new FilterFunction<Tuple4<Integer, Long, Boolean, Integer>>() {
                    @Override
                    public boolean filter(Tuple4<Integer, Long, Boolean, Integer> count) throws Exception {
                        return count.f3 >= popThreshold;
                    }
                })
                // map grid cell to coordinates
                .map(new GridToCoordinates());

        //popularPlaces.print();
        popularPlaces.writeAsText("file:\\\\C:\\Users\\ht\\kafka_java.txt");

        // execute the transformation pipeline
        env.execute("Popular Places from Kafka");
    }
 
开发者ID:thr0n,项目名称:clojured-taxi-rides,代码行数:55,代码来源:PopularPlacesFromKafka.java

示例13: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		// Checking input parameters
		final ParameterTool params = ParameterTool.fromArgs(args);

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// make parameters available in the web interface
		env.getConfig().setGlobalJobParameters(params);

		// get input data
		DataStream<String> text;
		if (params.has("input")) {
			// read the text file from given input path
			text = env.readTextFile(params.get("input"));
		} else {
			System.out.println("Executing WordCount example with default input data set.");
			System.out.println("Use --input to specify file input.");
			// get default test text data
			text = env.fromElements(WordCountData.WORDS);
		}

		DataStream<Tuple2<String, Integer>> counts =
		// split up the lines in pairs (2-tuples) containing: (word,1)
		text.flatMap(new Tokenizer())
		// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (params.has("output")) {
			counts.writeAsText(params.get("output"));
		} else {
			System.out.println("Printing result to stdout. Use --output to specify output path.");
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:41,代码来源:WordCount.java

示例14: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<Sentence> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts = text
				// split up the lines in pairs (2-tuples) containing: (word,1)
				// this is done by a bolt that is wrapped accordingly
				.transform("BoltTokenizerPojo",
						TypeExtractor.getForObject(new Tuple2<String, Integer>("", 0)),
						new BoltWrapper<Sentence, Tuple2<String, Integer>>(new BoltTokenizerByName()))
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with POJO bolt tokenizer");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:32,代码来源:BoltTokenizerWordCountPojo.java

示例15: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(final String[] args) throws Exception {

		if (!parseParameters(args)) {
			return;
		}

		// set up the execution environment
		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		// get input data
		final DataStream<Tuple1<String>> text = getTextDataStream(env);

		final DataStream<Tuple2<String, Integer>> counts = text
				// split up the lines in pairs (2-tuples) containing: (word,1)
				// this is done by a Storm bolt that is wrapped accordingly
				.transform(
						"BoltTokenizerWithNames",
						TypeExtractor.getForObject(new Tuple2<String, Integer>("", 0)),
						new BoltWrapper<Tuple1<String>, Tuple2<String, Integer>>(
								new BoltTokenizerByName(), new Fields("sentence")))
				// group by the tuple field "0" and sum up tuple field "1"
				.keyBy(0).sum(1);

		// emit result
		if (fileOutput) {
			counts.writeAsText(outputPath);
		} else {
			counts.print();
		}

		// execute program
		env.execute("Streaming WordCount with schema bolt tokenizer");
	}
 
开发者ID:axbaretto,项目名称:flink,代码行数:34,代码来源:BoltTokenizerWordCountWithNames.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.writeAsText方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。