本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStream.writeAsCsv方法的典型用法代码示例。如果您正苦于以下问题:Java DataStream.writeAsCsv方法的具体用法?Java DataStream.writeAsCsv怎么用?Java DataStream.writeAsCsv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.streaming.api.datastream.DataStream
的用法示例。
在下文中一共展示了DataStream.writeAsCsv方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSimple
import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testSimple() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
final int NUM_CYCLES = 400;
final int INPUT_GROUP_COUNT = 7; // Days of Week
List<TestHarness.DayDemoRecord> records = IntStream.range(0, NUM_CYCLES)
.flatMap(c -> IntStream.range(0, INPUT_GROUP_COUNT))
.mapToObj(day -> new TestHarness.DayDemoRecord(day))
.collect(Collectors.toList());
DataStream<TestHarness.DayDemoRecord> input = env.fromCollection(records);
DataStream<Tuple3<Integer,Double,Double>> result = HTM
.learn(input, new TestHarness.DayDemoNetworkFactory())
.resetOn(new ResetFunction<TestHarness.DayDemoRecord>() {
@Override
public boolean reset(TestHarness.DayDemoRecord value) throws Exception {
return value.dayOfWeek == 0;
}
})
.select(new InferenceSelectFunction<TestHarness.DayDemoRecord, Tuple3<Integer,Double,Double>>() {
@Override
public Tuple3<Integer,Double,Double> select(Tuple2<TestHarness.DayDemoRecord,NetworkInference> inference) throws Exception {
return new Tuple3<>(
inference.f0.dayOfWeek,
(Double) inference.f1.getClassification("dayOfWeek").getMostProbableValue(1),
inference.f1.getAnomalyScore());
}
});
result.writeAsCsv(resultPath, FileSystem.WriteMode.OVERWRITE);
result.print();
env.execute();
}
示例2: testProgram
import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
protected void testProgram() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> text = env.fromElements(WordCountData.TEXT);
DataStream<Tuple2<String, Integer>> counts = text
.flatMap(new Tokenizer())
.keyBy(0).sum(1);
counts.writeAsCsv(resultPath);
env.execute("WriteAsCsvTest");
}
示例3: main
import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (!parseParameters(args)) {
return;
}
// set up the execution environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// get input data
DataStream<String> text = getTextDataStream(env);
DataStream<Tuple2<String, Integer>> counts =
// normalize and split each line
text.map(line -> line.toLowerCase().split("\\W+"))
// convert split line in pairs (2-tuples) containing: (word,1)
.flatMap((String[] tokens, Collector<Tuple2<String, Integer>> out) -> {
// emit the pairs with non-zero-length words
Arrays.stream(tokens)
.filter(t -> t.length() > 0)
.forEach(t -> out.collect(new Tuple2<>(t, 1)));
})
// group by the tuple field "0" and sum up tuple field "1"
.keyBy(0)
.sum(1);
// emit result
if (fileOutput) {
counts.writeAsCsv(outputPath);
} else {
counts.print();
}
// execute program
env.execute("Streaming WordCount Example");
}