當前位置: 首頁>>代碼示例>>Java>>正文


Java DataStream.map方法代碼示例

本文整理匯總了Java中org.apache.flink.streaming.api.datastream.DataStream.map方法的典型用法代碼示例。如果您正苦於以下問題:Java DataStream.map方法的具體用法?Java DataStream.map怎麽用?Java DataStream.map使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.map方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testUnboundedPojoSourceButReturnInvalidTupleType

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Test(expected = InvalidTypesException.class)
public void testUnboundedPojoSourceButReturnInvalidTupleType() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5).closeDelay(1500));

    DataStream<Tuple5<Long, Integer, String, Double, Long>> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream");

    DataStream<Long> following = output.map(new MapFunction<Tuple5<Long, Integer, String, Double, Long>, Long>() {
        @Override
        public Long map(Tuple5<Long, Integer, String, Double, Long> value) throws Exception {
            return value.f0;
        }
    });

    String resultPath = tempFolder.newFile().toURI().toString();
    following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
    env.execute();
}
 
開發者ID:apache,項目名稱:bahir-flink,代碼行數:24,代碼來源:SiddhiCEPITCase.java

示例2: testUnboundedPojoSourceAndReturnTuple

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Test
public void testUnboundedPojoSourceAndReturnTuple() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Event> input = env.addSource(new RandomEventSource(5));

	DataStream<Tuple4<Long, Integer, String, Double>> output = SiddhiCEP
		.define("inputStream", input, "id", "name", "price", "timestamp")
		.cql("from inputStream select timestamp, id, name, price insert into  outputStream")
		.returns("outputStream");

	DataStream<Integer> following = output.map(new MapFunction<Tuple4<Long, Integer, String, Double>, Integer>() {
		@Override
		public Integer map(Tuple4<Long, Integer, String, Double> value) throws Exception {
			return value.f1;
		}
	});
	String resultPath = tempFolder.newFile().toURI().toString();
	following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
	assertEquals(5, getLineCount(resultPath));
}
 
開發者ID:haoch,項目名稱:flink-siddhi,代碼行數:22,代碼來源:SiddhiCEPITCase.java

示例3: testUnboundedPojoSourceButReturnInvalidTupleType

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Test(expected = InvalidTypesException.class)
public void testUnboundedPojoSourceButReturnInvalidTupleType() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Event> input = env.addSource(new RandomEventSource(5).closeDelay(1500));

	DataStream<Tuple5<Long, Integer, String, Double, Long>> output = SiddhiCEP
		.define("inputStream", input, "id", "name", "price", "timestamp")
		.cql("from inputStream select timestamp, id, name, price insert into  outputStream")
		.returns("outputStream");

	DataStream<Long> following = output.map(new MapFunction<Tuple5<Long, Integer, String, Double, Long>, Long>() {
		@Override
		public Long map(Tuple5<Long, Integer, String, Double, Long> value) throws Exception {
			return value.f0;
		}
	});

	String resultPath = tempFolder.newFile().toURI().toString();
	following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
	assertEquals(5, getLineCount(resultPath));
	env.execute();
}
 
開發者ID:haoch,項目名稱:flink-siddhi,代碼行數:24,代碼來源:SiddhiCEPITCase.java

示例4: testUnboundedPojoSourceAndReturnTuple

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Test
public void testUnboundedPojoSourceAndReturnTuple() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5));

    DataStream<Tuple4<Long, Integer, String, Double>> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream");

    DataStream<Integer> following = output.map(new MapFunction<Tuple4<Long, Integer, String, Double>, Integer>() {
        @Override
        public Integer map(Tuple4<Long, Integer, String, Double> value) throws Exception {
            return value.f1;
        }
    });
    String resultPath = tempFolder.newFile().toURI().toString();
    following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
}
 
開發者ID:apache,項目名稱:bahir-flink,代碼行數:22,代碼來源:SiddhiCEPITCase.java

示例5: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
			.url("http://localhost:8080/nifi")
			.portName("Data for Flink")
			.requestBatchCount(5)
			.buildConfig();

	SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
	DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);

	DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
		@Override
		public String map(NiFiDataPacket value) throws Exception {
			return new String(value.getContent(), Charset.defaultCharset());
		}
	});

	dataStream.print();
	env.execute();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:23,代碼來源:NiFiSourceTopologyExample.java

示例6: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:BucketingSinkFaultToleranceITCase.java

示例7: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:RollingSinkFaultToleranceITCase.java

示例8: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(Long.MAX_VALUE);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(5000)
			.setValidLengthPrefix("")
			.setPendingPrefix("");

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:BucketingSinkFaultTolerance2ITCase.java

示例9: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:27,代碼來源:BucketingSinkFaultToleranceITCase.java

示例10: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(Long.MAX_VALUE);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(5000)
			.setValidLengthPrefix("")
			.setPendingPrefix("");

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:25,代碼來源:RollingSinkFaultTolerance2ITCase.java

示例11: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:27,代碼來源:RollingSinkFaultToleranceITCase.java

示例12: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {

		final StreamExecutionEnvironment env =
				StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		// generate the sawtooth wave datapoints
		// a total of 40 steps, with 100 ms interval between each point = 4 sec period
		DataStream<UnkeyedDataPoint> originalSawTooth =
				env.addSource(new SawtoothSource(100, 40, 1));

		// attach key to the generated sawtooth
		DataStream<KeyedDataPoint> sawtoothKeyed = originalSawTooth
				.map(new AttachKeyToDataPoint("sawtooth"));

		// map the generated sawtooth to a sine wave,
		// and also attach key to the sine wave
		DataStream<KeyedDataPoint> sinewaveKeyed = originalSawTooth
				.map(new SawtoothToSinewave())
				.map(new AttachKeyToDataPoint("sinewave"));

		// we want to send both sawtooth and sine to InfluxDB, so union
		DataStream<KeyedDataPoint> completeKeyedStream =
				sawtoothKeyed.union(sinewaveKeyed);
		completeKeyedStream.addSink(new InfluxDBSink("sensors"));

		// windowing to sum up the datapoint values of the waves (key by "sawtooth" and "sinewave")
		completeKeyedStream
				.keyBy("key")
				.timeWindow(Time.seconds(4)) // 40 data points, 100 ms interval = 4 seconds
				.sum("value")
    			.addSink(new InfluxDBSink("sensors-summed"));

		env.execute();
	}
 
開發者ID:flink-taiwan,項目名稱:jcconf2016-workshop,代碼行數:37,代碼來源:SinewaveSum.java

示例13: main

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    List<String> dataList = new ArrayList<>();
    for (int i = 0; i < N; ++i) {
        String id = "server" + String.valueOf(i);
        dataList.add("cpu#" + id);
        dataList.add("mem#" + id);
        dataList.add("disk#" + id);
    }
    DataStream<String> source = env.fromElements(dataList.toArray(new String[0]));


    DataStream<InfluxDBPoint> dataStream = source.map(
            new RichMapFunction<String, InfluxDBPoint>() {
                @Override
                public InfluxDBPoint map(String s) throws Exception {
                    String[] input = s.split("#");

                    String measurement = input[0];
                    long timestamp = System.currentTimeMillis();

                    HashMap<String, String> tags = new HashMap<>();
                    tags.put("host", input[1]);
                    tags.put("region", "region#" + String.valueOf(input[1].hashCode() % 20));

                    HashMap<String, Object> fields = new HashMap<>();
                    fields.put("value1", input[1].hashCode() % 100);
                    fields.put("value2", input[1].hashCode() % 50);

                    return new InfluxDBPoint(measurement, timestamp, tags, fields);
                }
            }
    );

    //dataStream.print();

    //InfluxDBConfig influxDBConfig = new InfluxDBConfig.Builder("http://localhost:8086", "root", "root", "db_flink_test")
    InfluxDBConfig influxDBConfig = InfluxDBConfig.builder("http://localhost:8086", "root", "root", "db_flink_test")
            .batchActions(1000)
            .flushDuration(100, TimeUnit.MILLISECONDS)
            .enableGzip(true)
            .build();

    dataStream.addSink(new InfluxDBSink(influxDBConfig));

    env.execute("InfluxDB Sink Example");
}
 
開發者ID:apache,項目名稱:bahir-flink,代碼行數:49,代碼來源:InfluxDBSinkExample.java

示例14: testNonRollingSequenceFileWithoutCompressionWriter

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
 * This tests {@link SequenceFileWriter}
 * with non-rolling output and without compression.
 */
@Test
public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception {
	final int numElements = 20;
	final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out";
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(2);

	DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
			.broadcast()
			.filter(new OddEvenFilter());

	DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer, String>, Tuple2<IntWritable, Text>>() {
		private static final long serialVersionUID = 1L;

		@Override
		public Tuple2<IntWritable, Text> map(Tuple2<Integer, String> value) throws Exception {
			return Tuple2.of(new IntWritable(value.f0), new Text(value.f1));
		}
	});

	RollingSink<Tuple2<IntWritable, Text>> sink = new RollingSink<Tuple2<IntWritable, Text>>(outPath)
			.setWriter(new SequenceFileWriter<IntWritable, Text>())
			.setBucketer(new NonRollingBucketer())
			.setPartPrefix("part")
			.setPendingPrefix("")
			.setPendingSuffix("");

	mapped.addSink(sink);

	env.execute("RollingSink String Write Test");

	FSDataInputStream inStream = dfs.open(new Path(outPath + "/part-0-0"));

	SequenceFile.Reader reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	IntWritable intWritable = new IntWritable();
	Text txt = new Text();

	for (int i = 0; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();

	inStream = dfs.open(new Path(outPath + "/part-1-0"));

	reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	for (int i = 1; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:73,代碼來源:RollingSinkITCase.java

示例15: testNonRollingSequenceFileWithCompressionWriter

import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
 * This tests {@link SequenceFileWriter}
 * with non-rolling output but with compression.
 */
@Test
public void testNonRollingSequenceFileWithCompressionWriter() throws Exception {
	final int numElements = 20;
	final String outPath = hdfsURI + "/seq-non-rolling-out";
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(2);

	DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
			.broadcast()
			.filter(new OddEvenFilter());

	DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer, String>, Tuple2<IntWritable, Text>>() {
		private static final long serialVersionUID = 1L;

		@Override
		public Tuple2<IntWritable, Text> map(Tuple2<Integer, String> value) throws Exception {
			return Tuple2.of(new IntWritable(value.f0), new Text(value.f1));
		}
	});

	RollingSink<Tuple2<IntWritable, Text>> sink = new RollingSink<Tuple2<IntWritable, Text>>(outPath)
			.setWriter(new SequenceFileWriter<IntWritable, Text>("Default", SequenceFile.CompressionType.BLOCK))
			.setBucketer(new NonRollingBucketer())
			.setPartPrefix("part")
			.setPendingPrefix("")
			.setPendingSuffix("");

	mapped.addSink(sink);

	env.execute("RollingSink String Write Test");

	FSDataInputStream inStream = dfs.open(new Path(outPath + "/part-0-0"));

	SequenceFile.Reader reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	IntWritable intWritable = new IntWritable();
	Text txt = new Text();

	for (int i = 0; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();

	inStream = dfs.open(new Path(outPath + "/part-1-0"));

	reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	for (int i = 1; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();
}
 
開發者ID:axbaretto,項目名稱:flink,代碼行數:73,代碼來源:RollingSinkITCase.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.map方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。