本文整理匯總了Java中org.apache.flink.streaming.api.datastream.DataStream.writeAsCsv方法的典型用法代碼示例。如果您正苦於以下問題:Java DataStream.writeAsCsv方法的具體用法?Java DataStream.writeAsCsv怎麽用?Java DataStream.writeAsCsv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.flink.streaming.api.datastream.DataStream
的用法示例。
在下文中一共展示了DataStream.writeAsCsv方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testSimple
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Test
public void testSimple() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
final int NUM_CYCLES = 400;
final int INPUT_GROUP_COUNT = 7; // Days of Week
List<TestHarness.DayDemoRecord> records = IntStream.range(0, NUM_CYCLES)
.flatMap(c -> IntStream.range(0, INPUT_GROUP_COUNT))
.mapToObj(day -> new TestHarness.DayDemoRecord(day))
.collect(Collectors.toList());
DataStream<TestHarness.DayDemoRecord> input = env.fromCollection(records);
DataStream<Tuple3<Integer,Double,Double>> result = HTM
.learn(input, new TestHarness.DayDemoNetworkFactory())
.resetOn(new ResetFunction<TestHarness.DayDemoRecord>() {
@Override
public boolean reset(TestHarness.DayDemoRecord value) throws Exception {
return value.dayOfWeek == 0;
}
})
.select(new InferenceSelectFunction<TestHarness.DayDemoRecord, Tuple3<Integer,Double,Double>>() {
@Override
public Tuple3<Integer,Double,Double> select(Tuple2<TestHarness.DayDemoRecord,NetworkInference> inference) throws Exception {
return new Tuple3<>(
inference.f0.dayOfWeek,
(Double) inference.f1.getClassification("dayOfWeek").getMostProbableValue(1),
inference.f1.getAnomalyScore());
}
});
result.writeAsCsv(resultPath, FileSystem.WriteMode.OVERWRITE);
result.print();
env.execute();
}
示例2: testProgram
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
protected void testProgram() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> text = env.fromElements(WordCountData.TEXT);
DataStream<Tuple2<String, Integer>> counts = text
.flatMap(new Tokenizer())
.keyBy(0).sum(1);
counts.writeAsCsv(resultPath);
env.execute("WriteAsCsvTest");
}
示例3: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
if (!parseParameters(args)) {
return;
}
// set up the execution environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// get input data
DataStream<String> text = getTextDataStream(env);
DataStream<Tuple2<String, Integer>> counts =
// normalize and split each line
text.map(line -> line.toLowerCase().split("\\W+"))
// convert split line in pairs (2-tuples) containing: (word,1)
.flatMap((String[] tokens, Collector<Tuple2<String, Integer>> out) -> {
// emit the pairs with non-zero-length words
Arrays.stream(tokens)
.filter(t -> t.length() > 0)
.forEach(t -> out.collect(new Tuple2<>(t, 1)));
})
// group by the tuple field "0" and sum up tuple field "1"
.keyBy(0)
.sum(1);
// emit result
if (fileOutput) {
counts.writeAsCsv(outputPath);
} else {
counts.print();
}
// execute program
env.execute("Streaming WordCount Example");
}