本文整理匯總了Java中org.apache.flink.streaming.api.datastream.DataStream類的典型用法代碼示例。如果您正苦於以下問題:Java DataStream類的具體用法?Java DataStream怎麽用?Java DataStream使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
DataStream類屬於org.apache.flink.streaming.api.datastream包,在下文中一共展示了DataStream類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
Properties properties = new Properties();
properties.load(new FileInputStream("src/main/resources/application.properties"));
Properties mqttProperties = new Properties();
// client id = a:<Organization_ID>:<App_Id>
mqttProperties.setProperty(MQTTSource.CLIENT_ID,
String.format("a:%s:%s",
properties.getProperty("Org_ID"),
properties.getProperty("App_Id")));
// mqtt server url = tcp://<Org_ID>.messaging.internetofthings.ibmcloud.com:1883
mqttProperties.setProperty(MQTTSource.URL,
String.format("tcp://%s.messaging.internetofthings.ibmcloud.com:1883",
properties.getProperty("Org_ID")));
// topic = iot-2/type/<Device_Type>/id/<Device_ID>/evt/<Event_Id>/fmt/json
mqttProperties.setProperty(MQTTSource.TOPIC,
String.format("iot-2/type/%s/id/%s/evt/%s/fmt/json",
properties.getProperty("Device_Type"),
properties.getProperty("Device_ID"),
properties.getProperty("EVENT_ID")));
mqttProperties.setProperty(MQTTSource.USERNAME, properties.getProperty("API_Key"));
mqttProperties.setProperty(MQTTSource.PASSWORD, properties.getProperty("APP_Authentication_Token"));
MQTTSource mqttSource = new MQTTSource(mqttProperties);
DataStreamSource<String> tempratureDataSource = env.addSource(mqttSource);
DataStream<String> stream = tempratureDataSource.map((MapFunction<String, String>) s -> s);
stream.print();
env.execute("Temperature Analysis");
}
示例2: testUnboundedPojoSourceButReturnInvalidTupleType
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
@Test(expected = InvalidTypesException.class)
public void testUnboundedPojoSourceButReturnInvalidTupleType() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Event> input = env.addSource(new RandomEventSource(5).closeDelay(1500));
DataStream<Tuple5<Long, Integer, String, Double, Long>> output = SiddhiCEP
.define("inputStream", input, "id", "name", "price", "timestamp")
.cql("from inputStream select timestamp, id, name, price insert into outputStream")
.returns("outputStream");
DataStream<Long> following = output.map(new MapFunction<Tuple5<Long, Integer, String, Double, Long>, Long>() {
@Override
public Long map(Tuple5<Long, Integer, String, Double, Long> value) throws Exception {
return value.f0;
}
});
String resultPath = tempFolder.newFile().toURI().toString();
following.writeAsText(resultPath, FileSystem.WriteMode.OVERWRITE);
env.execute();
assertEquals(5, getLineCount(resultPath));
env.execute();
}
示例3: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "test");
DataStream<TemperatureEvent> inputEventStream = env.addSource(
new FlinkKafkaConsumer09<TemperatureEvent>("test", new EventDeserializationSchema(), properties));
Pattern<TemperatureEvent, ?> warningPattern = Pattern.<TemperatureEvent> begin("first")
.subtype(TemperatureEvent.class).where(new FilterFunction<TemperatureEvent>() {
private static final long serialVersionUID = 1L;
public boolean filter(TemperatureEvent value) {
if (value.getTemperature() >= 26.0) {
return true;
}
return false;
}
}).within(Time.seconds(10));
DataStream<Alert> patternStream = CEP.pattern(inputEventStream, warningPattern)
.select(new PatternSelectFunction<TemperatureEvent, Alert>() {
private static final long serialVersionUID = 1L;
public Alert select(Map<String, TemperatureEvent> event) throws Exception {
return new Alert("Temperature Rise Detected:" + event.get("first").getTemperature()
+ " on machine name:" + event.get("first").getMachineName());
}
});
patternStream.print();
env.execute("CEP on Temperature Sensor");
}
示例4: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
final String input = "C:\\dev\\github\\clojured-taxi-rides\\resources\\datasets\\nycTaxiRides.gz";
final int maxEventDelay = 60; // events are out of order by max 60 seconds
final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second
// set up streaming execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// start the data generator
DataStream<TaxiRide> rides = env.addSource(
new TaxiRideSource(input, maxEventDelay, servingSpeedFactor));
DataStream<TaxiRide> filteredRides = rides
// filter out rides that do not start or stop in NYC
.filter(new NYCFilter());
// print the filtered stream
//filteredRides.print();
filteredRides.writeAsText("file:\\\\C:\\Users\\ht\\rides_java.txt");
// run the cleansing pipeline
env.execute("Taxi Ride Cleansing");
}
示例5: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("zookeeper.connect", "localhost:2181");
properties.setProperty("group.id", "test");
properties.setProperty("auto.offset.reset", "latest");
FlinkKafkaConsumer08<DeviceEvent> flinkKafkaConsumer08 = new FlinkKafkaConsumer08<>("device-data",
new DeviceSchema(), properties);
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<DeviceEvent> messageStream = env.addSource(flinkKafkaConsumer08);
Map<String, String> config = new HashMap<>();
config.put("cluster.name", "my-application");
// This instructs the sink to emit after every element, otherwise they would be buffered
config.put("bulk.flush.max.actions", "1");
List<InetSocketAddress> transportAddresses = new ArrayList<>();
transportAddresses.add(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 9300));
messageStream.addSink(new ElasticsearchSink<DeviceEvent>(config, transportAddresses, new ESSink()));
env.execute();
}
開發者ID:PacktPublishing,項目名稱:Practical-Real-time-Processing-and-Analytics,代碼行數:24,代碼來源:FlinkESConnector.java
示例6: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String... args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<WikipediaEditEvent> edits = env.addSource(new WikipediaEditsSource());
edits
.timeWindowAll(Time.minutes(1))
.apply(new AllWindowFunction<WikipediaEditEvent, Tuple3<Date, Long, Long>, TimeWindow>() {
@Override
public void apply(TimeWindow timeWindow, Iterable<WikipediaEditEvent> iterable, Collector<Tuple3<Date, Long, Long>> collector) throws Exception {
long count = 0;
long bytesChanged = 0;
for (WikipediaEditEvent event : iterable) {
count++;
bytesChanged += event.getByteDiff();
}
collector.collect(new Tuple3<>(new Date(timeWindow.getEnd()), count, bytesChanged));
}
})
.print();
env.execute();
}
示例7: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> dataStream = streamExecutionEnvironment.readTextFile("file:///tmp/flink-esper-input");
EsperStream<String> esperStream = Esper.pattern(dataStream, "select bytes from String");
DataStream<String> result = esperStream.select(new EsperSelectFunction<String>() {
private static final long serialVersionUID = 7093943872082195786L;
@Override
public String select(EventBean eventBean) throws Exception {
return new String((byte[]) eventBean.get("bytes"));
}
});
result.writeAsText("file:///tmp/flink-esper-output");
streamExecutionEnvironment.execute("Simple Flink Esper Example");
}
示例8: setupKayedRawMessagesStream
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
/***
* Setup the kayed stream of a raw stream.
*
* @param env
* @param streamSource
* @param parsingConfig
* @return
*/
private static KeyedStream<Tuple3<String, Long, String>, Tuple> setupKayedRawMessagesStream(
final StreamExecutionEnvironment env, String parsingConfig) {
DataStream<Tuple3<String, Long, String>> rawStream =
env.addSource(
new FileLinesStreamSource(configs.getStringProp("aisDataSetFilePath"), parsingConfig,true))
.flatMap(new RawStreamMapper(parsingConfig)).setParallelism(1);
// assign the timestamp of the AIS messages based on their timestamps
DataStream<Tuple3<String, Long, String>> rawStreamWithTimeStamp =
rawStream.assignTimestampsAndWatermarks(new RawMessageTimestampAssigner());
// Construct the keyed stream (i.e., trajectories stream) of the raw messages by grouping them
// based on the message ID (MMSI for vessels)
KeyedStream<Tuple3<String, Long, String>, Tuple> kaydAisMessagesStream =
rawStreamWithTimeStamp.keyBy(0).process(new RawMessagesSorter()).keyBy(0);
return kaydAisMessagesStream;
}
示例9: writeEnrichedStream
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
private static void writeEnrichedStream(DataStream<AisMessage> enrichedAisMessagesStream,
String parsingConfig, boolean writeOutputStreamToFile, String outputLineDelimiter,
String outputPath, String outputStreamTopic) throws IOException {
if (writeOutputStreamToFile) {
enrichedAisMessagesStream.map(new AisMessagesToCsvMapper(outputLineDelimiter)).writeAsText(
outputPath, WriteMode.OVERWRITE);
return;
}
// Write to Kafka
Properties producerProps = AppUtils.getKafkaProducerProperties();
FlinkKafkaProducer010Configuration<AisMessage> myProducerConfig =
FlinkKafkaProducer010.writeToKafkaWithTimestamps(enrichedAisMessagesStream,
outputStreamTopic, new AisMessageCsvSchema(parsingConfig, outputLineDelimiter),
producerProps);
myProducerConfig.setLogFailuresOnly(false);
myProducerConfig.setFlushOnCheckpoint(true);
}
示例10: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
// create execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "flink_consumer");
DataStream<String> stream = env
.addSource(new FlinkKafkaConsumer09<>("flink-demo", new SimpleStringSchema(), properties));
stream.map(new MapFunction<String, String>() {
private static final long serialVersionUID = -6867736771747690202L;
@Override
public String map(String value) throws Exception {
return "Stream Value: " + value;
}
}).print();
env.execute();
}
示例11: setupCustomerSinks
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
private static DataStreamSink<CustomerImpression>[] setupCustomerSinks(DataStream<CustomerImpression> msgStream) {
// Split the stream into multiple streams by customer Id
SplitStream<CustomerImpression> splitStream = msgStream.split(customerOutputSelector());
// Tie a separate sink to each fork of the split stream
List<Customer> customers = Customer.getAllCustomers();
DataStreamSink<CustomerImpression>[] customerSinks = new DataStreamSink[customers.size()];
int i = 0;
for (Customer customer : customers) {
customerSinks[i++] = splitStream
.select(customerStreamName(customer))
.addSink(new CustomerSinkFunction(customer));
}
return customerSinks;
}
示例12: testMergingWindowsWithEvictor
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
@Test
@SuppressWarnings("rawtypes")
public void testMergingWindowsWithEvictor() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);
DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2));
DataStream<Tuple3<String, String, Integer>> window1 = source
.windowAll(EventTimeSessionWindows.withGap(Time.seconds(5)))
.evictor(CountEvictor.of(5))
.process(new TestProcessAllWindowFunction());
OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window1.getTransformation();
OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator();
Assert.assertTrue(operator instanceof WindowOperator);
WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator;
Assert.assertTrue(winOperator.getTrigger() instanceof EventTimeTrigger);
Assert.assertTrue(winOperator.getWindowAssigner() instanceof EventTimeSessionWindows);
Assert.assertTrue(winOperator.getStateDescriptor() instanceof ListStateDescriptor);
processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1));
}
示例13: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
ParameterTool params = ParameterTool.fromArgs(args);
final String nycTaxiRidesPath = params.get("nycTaxiRidesPath");
final int maxEventDelay = 60; // events are out of order by max 60 seconds
final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second
// set up streaming execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// start the data generator
DataStream<TaxiRide> rides = env.addSource(
new TaxiRideSource(nycTaxiRidesPath, maxEventDelay, servingSpeedFactor));
// ===============================================================================
// 1. clean up `rides`, so that the output stream only contains events
// with valid geo coordinates within NYC.
// 2. print out the result stream to console
// ===============================================================================
// run the cleansing pipeline
env.execute("Taxi Ride Cleansing");
}
示例14: main
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
.url("http://localhost:8080/nifi")
.portName("Data for Flink")
.requestBatchCount(5)
.buildConfig();
SourceFunction<NiFiDataPacket> nifiSource = new NiFiSource(clientConfig);
DataStream<NiFiDataPacket> streamSource = env.addSource(nifiSource).setParallelism(2);
DataStream<String> dataStream = streamSource.map(new MapFunction<NiFiDataPacket, String>() {
@Override
public String map(NiFiDataPacket value) throws Exception {
return new String(value.getContent(), Charset.defaultCharset());
}
});
dataStream.print();
env.execute();
}
示例15: testInheritOverride
import org.apache.flink.streaming.api.datastream.DataStream; //導入依賴的package包/類
@Test
public void testInheritOverride() {
// verify that we can explicitly disable inheritance of the input slot sharing groups
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
FilterFunction<Long> dummyFilter = new FilterFunction<Long>() {
@Override
public boolean filter(Long value) {
return false;
}
};
DataStream<Long> src1 = env.generateSequence(1, 10).slotSharingGroup("group-1");
DataStream<Long> src2 = env.generateSequence(1, 10).slotSharingGroup("group-1");
// this should not inherit group but be in "default"
src1.union(src2).filter(dummyFilter).slotSharingGroup("default");
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
assertEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(1).getSlotSharingGroup());
assertNotEquals(vertices.get(0).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
assertNotEquals(vertices.get(1).getSlotSharingGroup(), vertices.get(2).getSlotSharingGroup());
}