当前位置: 首页>>代码示例>>Java>>正文


Java DataStream.map方法代码示例

本文整理汇总了Java中org.apache.flink.streaming.api.datastream.DataStream.map方法的典型用法代码示例。如果您正苦于以下问题:Java DataStream.map方法的具体用法?Java DataStream.map怎么用?Java DataStream.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.flink.streaming.api.datastream.DataStream的用法示例。


在下文中一共展示了DataStream.map方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testUnboundedPojoSourceButReturnInvalidTupleType

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test(expected = InvalidTypesException.class)
public void testUnboundedPojoSourceButReturnInvalidTupleType() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5).closeDelay(1500));

    DataStream<Tuple5<Long, Integer, String, Double, Long>> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream");

    DataStream<Long> following = output.map(new MapFunction<Tuple5<Long, Integer, String, Double, Long>, Long>() {
        @Override
        public Long map(Tuple5<Long, Integer, String, Double, Long> value) throws Exception {
            return value.f0;
        }
    });

    String resultPath = tempFolder.newFile().toURI().toString();
    following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
    env.execute();
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:24,代码来源:SiddhiCEPITCase.java

示例2: testUnboundedPojoSourceAndReturnTuple

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testUnboundedPojoSourceAndReturnTuple() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Event> input = env.addSource(new RandomEventSource(5));

	DataStream<Tuple4<Long, Integer, String, Double>> output = SiddhiCEP
		.define("inputStream", input, "id", "name", "price", "timestamp")
		.cql("from inputStream select timestamp, id, name, price insert into  outputStream")
		.returns("outputStream");

	DataStream<Integer> following = output.map(new MapFunction<Tuple4<Long, Integer, String, Double>, Integer>() {
		@Override
		public Integer map(Tuple4<Long, Integer, String, Double> value) throws Exception {
			return value.f1;
		}
	});
	String resultPath = tempFolder.newFile().toURI().toString();
	following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
	assertEquals(5, getLineCount(resultPath));
}
 
开发者ID:haoch,项目名称:flink-siddhi,代码行数:22,代码来源:SiddhiCEPITCase.java

示例3: testUnboundedPojoSourceButReturnInvalidTupleType

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test(expected = InvalidTypesException.class)
public void testUnboundedPojoSourceButReturnInvalidTupleType() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	DataStream<Event> input = env.addSource(new RandomEventSource(5).closeDelay(1500));

	DataStream<Tuple5<Long, Integer, String, Double, Long>> output = SiddhiCEP
		.define("inputStream", input, "id", "name", "price", "timestamp")
		.cql("from inputStream select timestamp, id, name, price insert into  outputStream")
		.returns("outputStream");

	DataStream<Long> following = output.map(new MapFunction<Tuple5<Long, Integer, String, Double, Long>, Long>() {
		@Override
		public Long map(Tuple5<Long, Integer, String, Double, Long> value) throws Exception {
			return value.f0;
		}
	});

	String resultPath = tempFolder.newFile().toURI().toString();
	following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
	env.execute();
	assertEquals(5, getLineCount(resultPath));
	env.execute();
}
 
开发者ID:haoch,项目名称:flink-siddhi,代码行数:24,代码来源:SiddhiCEPITCase.java

示例4: testUnboundedPojoSourceAndReturnTuple

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Test
public void testUnboundedPojoSourceAndReturnTuple() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    DataStream<Event> input = env.addSource(new RandomEventSource(5));

    DataStream<Tuple4<Long, Integer, String, Double>> output = SiddhiCEP
        .define("inputStream", input, "id", "name", "price", "timestamp")
        .cql("from inputStream select timestamp, id, name, price insert into  outputStream")
        .returns("outputStream");

    DataStream<Integer> following = output.map(new MapFunction<Tuple4<Long, Integer, String, Double>, Integer>() {
        @Override
        public Integer map(Tuple4<Long, Integer, String, Double> value) throws Exception {
            return value.f1;
        }
    });
    String resultPath = tempFolder.newFile().toURI().toString();
    following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
    env.execute();
    assertEquals(5, getLineCount(resultPath));
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:22,代码来源:SiddhiCEPITCase.java

示例5: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
			.url("http://localhost:8080/nifi")
			.portName("Data for Flink")
			.requestBatchCount(5)
			.buildConfig();

	SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
	DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);

	DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
		@Override
		public String map(NiFiDataPacket value) throws Exception {
			return new String(value.getContent(), Charset.defaultCharset());
		}
	});

	dataStream.print();
	env.execute();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:23,代码来源:NiFiSourceTopologyExample.java

示例6: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:BucketingSinkFaultToleranceITCase.java

示例7: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	env.enableCheckpointing(20);
	env.setParallelism(12);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:RollingSinkFaultToleranceITCase.java

示例8: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(Long.MAX_VALUE);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(5000)
			.setValidLengthPrefix("")
			.setPendingPrefix("");

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:BucketingSinkFaultTolerance2ITCase.java

示例9: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	BucketingSink<String> sink = new BucketingSink<String>(outPath)
			.setBucketer(new BasePathBucketer<String>())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:BucketingSinkFaultToleranceITCase.java

示例10: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(Long.MAX_VALUE);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(5000)
			.setValidLengthPrefix("")
			.setPendingPrefix("");

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:25,代码来源:RollingSinkFaultTolerance2ITCase.java

示例11: testProgram

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
@Override
public void testProgram(StreamExecutionEnvironment env) {
	assertTrue("Broken test setup", NUM_STRINGS % 40 == 0);

	int PARALLELISM = 12;

	env.enableCheckpointing(20);
	env.setParallelism(PARALLELISM);
	env.disableOperatorChaining();

	DataStream<String> stream = env.addSource(new StringGeneratingSourceFunction(NUM_STRINGS)).startNewChain();

	DataStream<String> mapped = stream
			.map(new OnceFailingIdentityMapper(NUM_STRINGS));

	RollingSink<String> sink = new RollingSink<String>(outPath)
			.setBucketer(new NonRollingBucketer())
			.setBatchSize(10000)
			.setValidLengthPrefix("")
			.setPendingPrefix("")
			.setPendingSuffix(PENDING_SUFFIX)
			.setInProgressSuffix(IN_PROGRESS_SUFFIX);

	mapped.addSink(sink);

}
 
开发者ID:axbaretto,项目名称:flink,代码行数:27,代码来源:RollingSinkFaultToleranceITCase.java

示例12: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

		final StreamExecutionEnvironment env =
				StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		// generate the sawtooth wave datapoints
		// a total of 40 steps, with 100 ms interval between each point = 4 sec period
		DataStream<UnkeyedDataPoint> originalSawTooth =
				env.addSource(new SawtoothSource(100, 40, 1));

		// attach key to the generated sawtooth
		DataStream<KeyedDataPoint> sawtoothKeyed = originalSawTooth
				.map(new AttachKeyToDataPoint("sawtooth"));

		// map the generated sawtooth to a sine wave,
		// and also attach key to the sine wave
		DataStream<KeyedDataPoint> sinewaveKeyed = originalSawTooth
				.map(new SawtoothToSinewave())
				.map(new AttachKeyToDataPoint("sinewave"));

		// we want to send both sawtooth and sine to InfluxDB, so union
		DataStream<KeyedDataPoint> completeKeyedStream =
				sawtoothKeyed.union(sinewaveKeyed);
		completeKeyedStream.addSink(new InfluxDBSink("sensors"));

		// windowing to sum up the datapoint values of the waves (key by "sawtooth" and "sinewave")
		completeKeyedStream
				.keyBy("key")
				.timeWindow(Time.seconds(4)) // 40 data points, 100 ms interval = 4 seconds
				.sum("value")
    			.addSink(new InfluxDBSink("sensors-summed"));

		env.execute();
	}
 
开发者ID:flink-taiwan,项目名称:jcconf2016-workshop,代码行数:37,代码来源:SinewaveSum.java

示例13: main

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    List<String> dataList = new ArrayList<>();
    for (int i = 0; i < N; ++i) {
        String id = "server" + String.valueOf(i);
        dataList.add("cpu#" + id);
        dataList.add("mem#" + id);
        dataList.add("disk#" + id);
    }
    DataStream<String> source = env.fromElements(dataList.toArray(new String[0]));


    DataStream<InfluxDBPoint> dataStream = source.map(
            new RichMapFunction<String, InfluxDBPoint>() {
                @Override
                public InfluxDBPoint map(String s) throws Exception {
                    String[] input = s.split("#");

                    String measurement = input[0];
                    long timestamp = System.currentTimeMillis();

                    HashMap<String, String> tags = new HashMap<>();
                    tags.put("host", input[1]);
                    tags.put("region", "region#" + String.valueOf(input[1].hashCode() % 20));

                    HashMap<String, Object> fields = new HashMap<>();
                    fields.put("value1", input[1].hashCode() % 100);
                    fields.put("value2", input[1].hashCode() % 50);

                    return new InfluxDBPoint(measurement, timestamp, tags, fields);
                }
            }
    );

    //dataStream.print();

    //InfluxDBConfig influxDBConfig = new InfluxDBConfig.Builder("http://localhost:8086", "root", "root", "db_flink_test")
    InfluxDBConfig influxDBConfig = InfluxDBConfig.builder("http://localhost:8086", "root", "root", "db_flink_test")
            .batchActions(1000)
            .flushDuration(100, TimeUnit.MILLISECONDS)
            .enableGzip(true)
            .build();

    dataStream.addSink(new InfluxDBSink(influxDBConfig));

    env.execute("InfluxDB Sink Example");
}
 
开发者ID:apache,项目名称:bahir-flink,代码行数:49,代码来源:InfluxDBSinkExample.java

示例14: testNonRollingSequenceFileWithoutCompressionWriter

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * This tests {@link SequenceFileWriter}
 * with non-rolling output and without compression.
 */
@Test
public void testNonRollingSequenceFileWithoutCompressionWriter() throws Exception {
	final int numElements = 20;
	final String outPath = hdfsURI + "/seq-no-comp-non-rolling-out";
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(2);

	DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
			.broadcast()
			.filter(new OddEvenFilter());

	DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer, String>, Tuple2<IntWritable, Text>>() {
		private static final long serialVersionUID = 1L;

		@Override
		public Tuple2<IntWritable, Text> map(Tuple2<Integer, String> value) throws Exception {
			return Tuple2.of(new IntWritable(value.f0), new Text(value.f1));
		}
	});

	RollingSink<Tuple2<IntWritable, Text>> sink = new RollingSink<Tuple2<IntWritable, Text>>(outPath)
			.setWriter(new SequenceFileWriter<IntWritable, Text>())
			.setBucketer(new NonRollingBucketer())
			.setPartPrefix("part")
			.setPendingPrefix("")
			.setPendingSuffix("");

	mapped.addSink(sink);

	env.execute("RollingSink String Write Test");

	FSDataInputStream inStream = dfs.open(new Path(outPath + "/part-0-0"));

	SequenceFile.Reader reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	IntWritable intWritable = new IntWritable();
	Text txt = new Text();

	for (int i = 0; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();

	inStream = dfs.open(new Path(outPath + "/part-1-0"));

	reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	for (int i = 1; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:RollingSinkITCase.java

示例15: testNonRollingSequenceFileWithCompressionWriter

import org.apache.flink.streaming.api.datastream.DataStream; //导入方法依赖的package包/类
/**
 * This tests {@link SequenceFileWriter}
 * with non-rolling output but with compression.
 */
@Test
public void testNonRollingSequenceFileWithCompressionWriter() throws Exception {
	final int numElements = 20;
	final String outPath = hdfsURI + "/seq-non-rolling-out";
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(2);

	DataStream<Tuple2<Integer, String>> source = env.addSource(new TestSourceFunction(numElements))
			.broadcast()
			.filter(new OddEvenFilter());

	DataStream<Tuple2<IntWritable, Text>> mapped =  source.map(new MapFunction<Tuple2<Integer, String>, Tuple2<IntWritable, Text>>() {
		private static final long serialVersionUID = 1L;

		@Override
		public Tuple2<IntWritable, Text> map(Tuple2<Integer, String> value) throws Exception {
			return Tuple2.of(new IntWritable(value.f0), new Text(value.f1));
		}
	});

	RollingSink<Tuple2<IntWritable, Text>> sink = new RollingSink<Tuple2<IntWritable, Text>>(outPath)
			.setWriter(new SequenceFileWriter<IntWritable, Text>("Default", SequenceFile.CompressionType.BLOCK))
			.setBucketer(new NonRollingBucketer())
			.setPartPrefix("part")
			.setPendingPrefix("")
			.setPendingSuffix("");

	mapped.addSink(sink);

	env.execute("RollingSink String Write Test");

	FSDataInputStream inStream = dfs.open(new Path(outPath + "/part-0-0"));

	SequenceFile.Reader reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	IntWritable intWritable = new IntWritable();
	Text txt = new Text();

	for (int i = 0; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();

	inStream = dfs.open(new Path(outPath + "/part-1-0"));

	reader = new SequenceFile.Reader(inStream,
			1000,
			0,
			100000,
			new Configuration());

	for (int i = 1; i < numElements; i += 2) {
		reader.next(intWritable, txt);
		Assert.assertEquals(i, intWritable.get());
		Assert.assertEquals("message #" + i, txt.toString());
	}

	reader.close();
	inStream.close();
}
 
开发者ID:axbaretto,项目名称:flink,代码行数:73,代码来源:RollingSinkITCase.java


注:本文中的org.apache.flink.streaming.api.datastream.DataStream.map方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。