本文整理汇总了Java中org.apache.flink.api.java.tuple.Tuple3类的典型用法代码示例。如果您正苦于以下问题:Java Tuple3类的具体用法?Java Tuple3怎么用?Java Tuple3使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Tuple3类属于org.apache.flink.api.java.tuple包,在下文中一共展示了Tuple3类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testPassingConfigurationObject
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@Test
public void testPassingConfigurationObject() throws Exception {
/*
* Test passing configuration object.
*/
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env);
Configuration conf = new Configuration();
conf.setInteger(TEST_KEY, TEST_VALUE);
DataSet<Tuple3<Integer, Long, String>> bcMapDs = ds.
map(new RichMapper2()).withParameters(conf);
List<Tuple3<Integer, Long, String>> result = bcMapDs.collect();
String expected = "1,1,Hi\n"
+ "2,2,Hello\n"
+ "3,2,Hello world";
compareResultAsTuples(result, expected);
}
示例2: transformation
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
/**
* Data transformation.
* The method group by trackId, sum the number of occurrences, sort the output
* and get the top elements defined by the user.
* @param input
* @return
*/
@Override
public DataSet<ChartsResult> transformation(DataSet<?> input) {
log.info("Transformation Phase. Computing the tags");
return input
.groupBy(0) // Grouping by trackId
.sum(1) // Sum the occurrences of each grouped item
.sortPartition(1, Order.DESCENDING).setParallelism(1) // Sort by count
.first(pipelineConf.args.getLimit())
.map( t -> {
Tuple3<Long, Integer, TagEvent> tuple= (Tuple3<Long, Integer, TagEvent>) t;
return new ChartsResult(tuple.f0, tuple.f1, tuple.f2);
})
.returns(new TypeHint<ChartsResult>(){});
}
示例3: testInputOfCombinerIsSortedForCombinableGroupReduceWithGroupSorting
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@Test
public void testInputOfCombinerIsSortedForCombinableGroupReduceWithGroupSorting() throws Exception {
/*
* check that input of combiner is also sorted for combinable groupReduce with group sorting
*/
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
DataSet<Tuple3<Integer, Long, String>> reduceDs = ds.
groupBy(1).sortGroup(0, Order.ASCENDING).reduceGroup(new OrderCheckingCombinableReduce());
List<Tuple3<Integer, Long, String>> result = reduceDs.collect();
String expected = "1,1,Hi\n" +
"2,2,Hello\n" +
"4,3,Hello world, how are you?\n" +
"7,4,Comment#1\n" +
"11,5,Comment#5\n" +
"16,6,Comment#10\n";
compareResultAsTuples(result, expected);
}
示例4: main
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
public static void main(String... args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<WikipediaEditEvent> edits = env.addSource(new WikipediaEditsSource());
edits
.timeWindowAll(Time.minutes(1))
.apply(new AllWindowFunction<WikipediaEditEvent, Tuple3<Date, Long, Long>, TimeWindow>() {
@Override
public void apply(TimeWindow timeWindow, Iterable<WikipediaEditEvent> iterable, Collector<Tuple3<Date, Long, Long>> collector) throws Exception {
long count = 0;
long bytesChanged = 0;
for (WikipediaEditEvent event : iterable) {
count++;
bytesChanged += event.getByteDiff();
}
collector.collect(new Tuple3<>(new Date(timeWindow.getEnd()), count, bytesChanged));
}
})
.print();
env.execute();
}
示例5: cleansingTest
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
/**
* Test to validate the cleansing method.
* We generate a DataSet with 10 TagEvents and modify 2 items to force bad data
* The assertion checks that only are obtained the proper number of items after the
* cleansing process.
* @throws Exception
*/
@Test
public void cleansingTest() throws Exception {
String args[]= {"-c", "chart", "-l", "3"};
argsParser= ArgsParser.builder(args);
PipelineChartsConf pipelineConf= new PipelineChartsConf(config, argsParser);
SimpleChartsPipeline pipeline= new SimpleChartsPipeline(pipelineConf);
List<TagEvent> mockCollection= TagEventUtils.getMockData(10);
mockCollection.set(0, new TagEvent(0l, "xxx", "yy","zz"));
mockCollection.set(4, new TagEvent(99l, "xxx", "yy",""));
DataSet<TagEvent> mockDataset= pipeline.getEnv().fromCollection(mockCollection);
DataSet<Tuple3<Long, Integer, TagEvent>> clean = pipeline.cleansing(mockDataset);
assertEquals(9, clean.count());
}
示例6: main
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
// set up the execution environment
final StreamExecutionEnvironment env = new StreamExecutionEnvBuilder().build();
// Get the json config for parsing the raw input stream
String parsingConfig = AppUtils.getParsingJsonConfig();
KeyedStream<Tuple3<String, Long, String>, Tuple> kaydRawMessagesStream =
setupKayedRawMessagesStream(env, parsingConfig);
String outputStreamTopicName = configs.getStringProp("inputStreamTopicName");
double streamDelayScale = configs.getDoubleProp("streamDelayScale");
Properties producerProps = AppUtils.getKafkaProducerProperties();
// replay the stream
kaydRawMessagesStream.map(new StreamPlayer(streamDelayScale, outputStreamTopicName,
producerProps)).setParallelism(1);
// execute program
env.execute("datAcron In-Situ Processing AIS Message Stream Simulator"
+ AppUtils.getAppVersion());
}
示例7: setupKayedRawMessagesStream
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
/***
* Setup the kayed stream of a raw stream.
*
* @param env
* @param streamSource
* @param parsingConfig
* @return
*/
private static KeyedStream<Tuple3<String, Long, String>, Tuple> setupKayedRawMessagesStream(
final StreamExecutionEnvironment env, String parsingConfig) {
DataStream<Tuple3<String, Long, String>> rawStream =
env.addSource(
new FileLinesStreamSource(configs.getStringProp("aisDataSetFilePath"), parsingConfig,true))
.flatMap(new RawStreamMapper(parsingConfig)).setParallelism(1);
// assign the timestamp of the AIS messages based on their timestamps
DataStream<Tuple3<String, Long, String>> rawStreamWithTimeStamp =
rawStream.assignTimestampsAndWatermarks(new RawMessageTimestampAssigner());
// Construct the keyed stream (i.e., trajectories stream) of the raw messages by grouping them
// based on the message ID (MMSI for vessels)
KeyedStream<Tuple3<String, Long, String>, Tuple> kaydAisMessagesStream =
rawStreamWithTimeStamp.keyBy(0).process(new RawMessagesSorter()).keyBy(0);
return kaydAisMessagesStream;
}
示例8: processElement
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@Override
public void processElement(Tuple3<String, Long, String> message, Context context,
Collector<Tuple3<String, Long, String>> out) throws Exception {
TimerService timerService = context.timerService();
if (context.timestamp() > timerService.currentWatermark()) {
PriorityQueue<Tuple3<String, Long, String>> queue = queueState.value();
if (queue == null) {
queue = new PriorityQueue<>(15, new RawMessageTuplesComparator());
}
queue.add(message);
queueState.update(queue);
// register a timer to be fired when the watermark passes this message timestamp
timerService.registerEventTimeTimer(message.f1);
} else {
String outOfOrderErrorMessage = "out of order message: " + message.f2;
logger.info(outOfOrderErrorMessage);
throw new Exception(outOfOrderErrorMessage);
}
}
示例9: parseRawLine
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
/**
* Extract the message id & timestamp
*
* @param value
* @return Tuple3<String, Long, String>(id, timestamp, value)
*/
private Tuple3<String, Long, String> parseRawLine(String value) {
String delimiter = parsingConfigs.getString("delimiter");
String[] fieldsValue = value.split(delimiter);
// get message id
int idIndex = parsingConfigs.getInt("id");
String id = fieldsValue[idIndex];
// get message timestamp
int timestampIndex = parsingConfigs.getInt("timestamp");
long timestamp;
try {
timestamp = Long.parseLong(fieldsValue[timestampIndex]);
} catch (NumberFormatException ex) {
logger.error(ex.getMessage());
return null;
}
return new Tuple3<String, Long, String>(id, timestamp, value);
}
示例10: getSimulatedTimeDelayBetweenRawMessages
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
/**
* Find the delay between the new raw message and the last received one
*
* @param rawMessageTuple
* @return
* @throws Exception
*/
private long getSimulatedTimeDelayBetweenRawMessages(Tuple3<String, Long, String> rawMessageTuple)
throws Exception {
// access the state value
long currentPointTimestamp = rawMessageTuple.f1;
long lastPointTimeStamp =
lastTimestamp.value() == null ? currentPointTimestamp : lastTimestamp.value();
lastTimestamp.update(currentPointTimestamp);
long delay = (long) ((currentPointTimestamp - lastPointTimeStamp) * simulationWaitingScale);
if (delay < 0) {
String errorMessage =
"negative delay" + delay + "for " + rawMessageTuple + " old timestamp" + lastTimestamp;
logger.error(errorMessage);
}
return delay;
}
示例11: testTupleSortingNestedParallelism1
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@Test
public void testTupleSortingNestedParallelism1() throws Exception {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Tuple2<Integer, Integer>, String, Integer>> ds =
CollectionDataSets.getGroupSortedNestedTupleDataSet2(env);
ds.writeAsText(resultPath)
.sortLocalOutput("f0.f1", Order.ASCENDING)
.sortLocalOutput("f1", Order.DESCENDING)
.setParallelism(1);
env.execute();
String expected =
"((2,1),a,3)\n" +
"((2,2),b,4)\n" +
"((1,2),a,1)\n" +
"((3,3),c,5)\n" +
"((1,3),a,2)\n" +
"((3,6),c,6)\n" +
"((4,9),c,7)\n";
compareResultsByLinesInMemoryWithStrictOrder(expected, resultPath);
}
示例12: createSerializer
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
protected TupleSerializer<Tuple3<Tuple2<String, Double>, Tuple2<Long, Long>, Tuple2<Integer, Long>>> createSerializer() {
return new TupleSerializer<Tuple3<Tuple2<String, Double>, Tuple2<Long, Long>, Tuple2<Integer, Long>>>(
(Class<Tuple3<Tuple2<String, Double>, Tuple2<Long, Long>, Tuple2<Integer, Long>>>) (Class<?>) Tuple3.class,
new TypeSerializer[]{
new TupleSerializer<Tuple2<String, Double>> (
(Class<Tuple2<String, Double>>) (Class<?>) Tuple2.class,
new TypeSerializer[]{
StringSerializer.INSTANCE,
DoubleSerializer.INSTANCE}),
new TupleSerializer<Tuple2<Long, Long>> (
(Class<Tuple2<Long, Long>>) (Class<?>) Tuple2.class,
new TypeSerializer[]{
LongSerializer.INSTANCE,
LongSerializer.INSTANCE}),
new TupleSerializer<Tuple2<Integer, Long>> (
(Class<Tuple2<Integer, Long>>) (Class<?>) Tuple2.class,
new TypeSerializer[]{
IntSerializer.INSTANCE,
LongSerializer.INSTANCE})
});
}
示例13: testAllGroupCombineIdentity
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@Test
public void testAllGroupCombineIdentity() throws Exception {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
DataSet<Tuple3<Integer, Long, String>> reduceDs = ds
// combine
.combineGroup(new IdentityFunction())
// fully reduce
.reduceGroup(new IdentityFunction());
List<Tuple3<Integer, Long, String>> result = reduceDs.collect();
compareResultAsTuples(result, identityResult);
}
示例14: testLeftOuterJoinOnTuplesWithKeyPositions
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
private void testLeftOuterJoinOnTuplesWithKeyPositions(JoinHint hint) throws Exception {
/*
* UDF Join on tuples with key field positions
*/
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env);
DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env);
DataSet<Tuple2<String, String>> joinDs =
ds1.leftOuterJoin(ds2, hint)
.where(0)
.equalTo(0)
.with(new T3T5FlatJoin());
List<Tuple2<String, String>> result = joinDs.collect();
String expected = "Hi,Hallo\n" +
"Hello,Hallo Welt\n" +
"Hello,Hallo Welt wie\n" +
"Hello world,null\n";
compareResultAsTuples(result, expected);
}
示例15: testCorrectnessOfDistinctOnTuplesWithKeyFieldSelector
import org.apache.flink.api.java.tuple.Tuple3; //导入依赖的package包/类
@Test
public void testCorrectnessOfDistinctOnTuplesWithKeyFieldSelector() throws Exception {
/*
* check correctness of distinct on tuples with key field selector
*/
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env);
DataSet<Tuple3<Integer, Long, String>> distinctDs = ds.union(ds).distinct(0, 1, 2);
List<Tuple3<Integer, Long, String>> result = distinctDs.collect();
String expected = "1,1,Hi\n" +
"2,2,Hello\n" +
"3,2,Hello world\n";
compareResultAsTuples(result, expected);
}