本文整理匯總了Java中org.apache.flink.streaming.api.datastream.DataStream.transform方法的典型用法代碼示例。如果您正苦於以下問題:Java DataStream.transform方法的具體用法?Java DataStream.transform怎麽用?Java DataStream.transform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.flink.streaming.api.datastream.DataStream
的用法示例。
在下文中一共展示了DataStream.transform方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testOutputTypeConfigurationWithOneInputTransformation
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
* Test whether an {@link OutputTypeConfigurable} implementation gets called with the correct
* output type. In this test case the output type must be BasicTypeInfo.INT_TYPE_INFO.
*
* @throws Exception
*/
@Test
public void testOutputTypeConfigurationWithOneInputTransformation() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Integer> source = env.fromElements(1, 10);
OutputTypeConfigurableOperationWithOneInput outputTypeConfigurableOperation = new OutputTypeConfigurableOperationWithOneInput();
DataStream<Integer> result = source.transform(
"Single input and output type configurable operation",
BasicTypeInfo.INT_TYPE_INFO,
outputTypeConfigurableOperation);
result.addSink(new DiscardingSink<Integer>());
env.getStreamGraph();
assertEquals(BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation.getTypeInformation());
}
示例2: testOperatorChainedToSource
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
* Note: this test fails if we don't check for exceptions in the source contexts and do not
* synchronize in the source contexts.
*/
@Test
public void testOperatorChainedToSource() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(timeCharacteristic);
env.setParallelism(1);
DataStream<String> source = env.addSource(new InfiniteTestSource());
source.transform("Custom Operator", BasicTypeInfo.STRING_TYPE_INFO, new TimerOperator(ChainingStrategy.ALWAYS));
boolean testSuccess = false;
try {
env.execute("Timer test");
} catch (JobExecutionException e) {
if (e.getCause() instanceof TimerException) {
TimerException te = (TimerException) e.getCause();
if (te.getCause() instanceof RuntimeException) {
RuntimeException re = (RuntimeException) te.getCause();
if (re.getMessage().equals("TEST SUCCESS")) {
testSuccess = true;
} else {
throw e;
}
} else {
throw e;
}
} else {
throw e;
}
}
Assert.assertTrue(testSuccess);
}
示例3: testOneInputOperatorWithoutChaining
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
* Note: this test fails if we don't check for exceptions in the source contexts and do not
* synchronize in the source contexts.
*/
@Test
public void testOneInputOperatorWithoutChaining() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(timeCharacteristic);
env.setParallelism(1);
DataStream<String> source = env.addSource(new InfiniteTestSource());
source.transform("Custom Operator", BasicTypeInfo.STRING_TYPE_INFO, new TimerOperator(ChainingStrategy.NEVER));
boolean testSuccess = false;
try {
env.execute("Timer test");
} catch (JobExecutionException e) {
if (e.getCause() instanceof TimerException) {
TimerException te = (TimerException) e.getCause();
if (te.getCause() instanceof RuntimeException) {
RuntimeException re = (RuntimeException) te.getCause();
if (re.getMessage().equals("TEST SUCCESS")) {
testSuccess = true;
} else {
throw e;
}
} else {
throw e;
}
} else {
throw e;
}
}
Assert.assertTrue(testSuccess);
}
示例4: createDataStream
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public static <OUT> DataStream<OUT> createDataStream(SiddhiOperatorContext context, DataStream<Tuple2<String, Object>> namedStream) {
return namedStream.transform(context.getName(), context.getOutputStreamType(), new SiddhiStreamOperator(context));
}
示例5: createDataStream
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public static <OUT> DataStream<OUT> createDataStream(SiddhiOperatorContext context, DataStream<Tuple2<String, Object>> namedStream) {
return namedStream.transform(context.getName(), context.getOutputStreamType(), new SiddhiStreamOperator(context));
}
示例6: writeToKafkaWithTimestamps
import org.apache.flink.streaming.api.datastream.DataStream; //導入方法依賴的package包/類
/**
* Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to
* the topic.
*
* This constructor allows writing timestamps to Kafka, it follow approach (b) (see above)
*
* @param inStream The stream to write to Kafka
* @param topicId The name of the target topic
* @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
* @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
* @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
*/
public static <T> FlinkKafkaProducer010Configuration<T> writeToKafkaWithTimestamps(DataStream<T> inStream,
String topicId,
KeyedSerializationSchema<T> serializationSchema,
Properties producerConfig,
KafkaPartitioner<T> customPartitioner) {
GenericTypeInfo<Object> objectTypeInfo = new GenericTypeInfo<>(Object.class);
FlinkKafkaProducer010<T> kafkaProducer = new FlinkKafkaProducer010<>(topicId, serializationSchema, producerConfig, customPartitioner);
SingleOutputStreamOperator<Object> transformation = inStream.transform("FlinKafkaProducer 0.10.x", objectTypeInfo, kafkaProducer);
return new FlinkKafkaProducer010Configuration<>(transformation, kafkaProducer);
}