本文整理汇总了Java中org.apache.kafka.streams.kstream.KStream类的典型用法代码示例。如果您正苦于以下问题:Java KStream类的具体用法?Java KStream怎么用?Java KStream使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KStream类属于org.apache.kafka.streams.kstream包,在下文中一共展示了KStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
public static void main(String[] args) throws CertificateException, NoSuchAlgorithmException,
KeyStoreException, IOException, URISyntaxException {
Properties streamsConfig = new AggregatorConfig().getProperties();
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Windowed<String>, String> words =
builder.stream(String.format("%swords", HEROKU_KAFKA_PREFIX));
words
.groupBy((key, word) -> word)
.windowedBy(TimeWindows.of(TimeUnit.SECONDS.toMillis(10)))
.count(Materialized.as("windowed-counts"))
.toStream()
.process(PostgresSink::new);
final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfig);
streams.cleanUp();
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
示例2: buildStreams
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
private KStreamBuilder buildStreams() {
KStreamBuilder builder = new KStreamBuilder();
KStream<String, JsonNode> salesAggregated =
builder.stream(Serdes.String(), JSON_SERDE, "sales-aggregated");
salesAggregated.foreach(Aggregator::markAsProcessedInDatastore);
KStream<String, JsonNode> salesRaw =
builder.stream(Serdes.String(), JSON_SERDE, "sales-raw");
salesRaw.filterNot(Aggregator::isAlreadyProcessed)
.groupBy(Aggregator::groupByUserId, Serdes.String(), JSON_SERDE)
.aggregate(() -> null, Aggregator::aggregateValues, JSON_SERDE, "aggregated-interm")
.to(Serdes.String(), JSON_SERDE, "sales-aggregated");
return builder;
}
示例3: main
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
public static void main(String[] args) {
StreamsBuilder builder = new StreamsBuilder();
KStream<Long, byte[]> order = builder.stream(MallConstants.ORDER_COMMITED_TOPIC);
KeyValueMapper<Long, byte[], Long> userSelector = new KeyValueMapper<Long, byte[], Long>() {
@Override
public Long apply(Long key, byte[] value) {
Order o = JsonSerializable.decode(value, Order.class);
return null == o ? null : o.getUserId();
}
};
long sizeMs = 30 * 24 * 60 * 60 * 1000;// 30 day
order.groupBy(userSelector, Serialized.with(new LongSerde(), new Serdes.ByteArraySerde()))//
.windowedBy(TimeWindows.of(sizeMs))//
.aggregate(() -> 0L, (aggKey, value, aggregate) -> aggregate + 1L)//
.toStream()//
.to(OrderConstants.USER_ORDER_COUNT_TOPIC);
}
示例4: main
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Properties kafkaStreamProperties = new Properties();
kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount");
kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
Serde<String> stringSerde = Serdes.String();
Serde<Long> longSerde = Serdes.Long();
KStreamBuilder streamTopology = new KStreamBuilder();
KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input");
KStream<String, Long> wordCounts = topicRecords
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
.map((key, word) -> new KeyValue<>(word, word))
.countByKey("Count")
.toStream();
wordCounts.to(stringSerde, longSerde, "wordCount");
KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties);
streamManager.start();
Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:26,代码来源:KafkaStreamWordCount.java
示例5: main
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
Properties kafkaStreamProperties = new Properties();
kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "IP-Fraud-Detection");
kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
Serde<String> stringSerde = Serdes.String();
KStreamBuilder fraudDetectionTopology = new KStreamBuilder();
KStream<String, String> ipRecords = fraudDetectionTopology.stream(stringSerde, stringSerde, propertyReader.getPropertyValue("topic"));
KStream<String, String> fraudIpRecords = ipRecords
.filter((k, v) -> isFraud(v));
fraudIpRecords.to(propertyReader.getPropertyValue("output_topic"));
KafkaStreams streamManager = new KafkaStreams(fraudDetectionTopology, kafkaStreamProperties);
streamManager.start();
Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:25,代码来源:IPFraudKafkaStreamApp.java
示例6: branch
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public KStream<K, V>[] branch(Predicate<? super K, ? super V>... predicates) {
if (predicates.length == 0) {
throw new IllegalArgumentException("you must provide at least one predicate");
}
for (Predicate<? super K, ? super V> predicate : predicates) {
Objects.requireNonNull(predicate, "predicates can't have null values");
}
String branchName = topology.newName(BRANCH_NAME);
topology.addProcessor(branchName, new KStreamBranch(predicates.clone()), this.name);
KStream<K, V>[] branchChildren = (KStream<K, V>[]) Array.newInstance(KStream.class, predicates.length);
for (int i = 0; i < predicates.length; i++) {
String childName = topology.newName(BRANCHCHILD_NAME);
topology.addProcessor(childName, new KStreamPassThrough<K, V>(), branchName);
branchChildren[i] = new KStreamImpl<>(topology, childName, sourceNodes, this.repartitionRequired);
}
return branchChildren;
}
示例7: merge
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
public static <K, V> KStream<K, V> merge(KStreamBuilder topology, KStream<K, V>[] streams) {
if (streams == null || streams.length == 0) {
throw new IllegalArgumentException("Parameter <streams> must not be null or has length zero");
}
String name = topology.newName(MERGE_NAME);
String[] parentNames = new String[streams.length];
Set<String> allSourceNodes = new HashSet<>();
boolean requireRepartitioning = false;
for (int i = 0; i < streams.length; i++) {
KStreamImpl stream = (KStreamImpl) streams[i];
parentNames[i] = stream.name;
requireRepartitioning |= stream.repartitionRequired;
allSourceNodes.addAll(stream.sourceNodes);
}
topology.addProcessor(name, new KStreamPassThrough<>(), parentNames);
return new KStreamImpl<>(topology, name, allSourceNodes, requireRepartitioning);
}
示例8: leftJoin
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
@Override
public <V1, R> KStream<K, R> leftJoin(
final KStream<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner,
final JoinWindows windows,
final Serde<K> keySerde,
final Serde<V> thisValSerde,
final Serde<V1> otherValueSerde) {
return doJoin(other,
joiner,
windows,
keySerde,
thisValSerde,
otherValueSerde,
new KStreamImplJoin(true, false));
}
示例9: mapMapJoin
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
private ExpectedOutputOnTopic mapMapJoin() throws Exception {
final KStream<Integer, Integer> mapMapStream = streamOne.map(
new KeyValueMapper<Long, Integer, KeyValue<Long, Integer>>() {
@Override
public KeyValue<Long, Integer> apply(final Long key, final Integer value) {
if (value == null) {
return new KeyValue<>(null, null);
}
return new KeyValue<>(key + value, value);
}
}).map(keyMapper);
final String outputTopic = "map-map-join-" + testNo;
doJoin(mapMapStream, streamTwo, outputTopic);
return new ExpectedOutputOnTopic(expectedStreamOneTwoJoin, outputTopic);
}
示例10: mapBothStreamsAndLeftJoin
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
private ExpectedOutputOnTopic mapBothStreamsAndLeftJoin() throws Exception {
final KStream<Integer, Integer> map1 = streamOne.map(keyMapper);
final KStream<Integer, String> map2 = streamTwo.map(MockKeyValueMapper.<Integer, String>NoOpKeyValueMapper());
final String outputTopic = "left-join-" + testNo;
CLUSTER.createTopic(outputTopic);
map1.leftJoin(map2,
TOSTRING_JOINER,
getJoinWindow(),
Serdes.Integer(),
Serdes.Integer(),
Serdes.String())
.filterNot(new Predicate<Integer, String>() {
@Override
public boolean test(Integer key, String value) {
// filter not left-only join results
return value.substring(2).equals("null");
}
})
.to(Serdes.Integer(), Serdes.String(), outputTopic);
return new ExpectedOutputOnTopic(expectedStreamOneTwoJoin, outputTopic);
}
示例11: createCountStream
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
/**
* Creates a typical word count topology
*/
private KafkaStreams createCountStream(final String inputTopic, final String outputTopic, final Properties streamsConfiguration) {
final KStreamBuilder builder = new KStreamBuilder();
final Serde<String> stringSerde = Serdes.String();
final KStream<String, String> textLines = builder.stream(stringSerde, stringSerde, inputTopic);
final KGroupedStream<String, String> groupedByWord = textLines
.flatMapValues(new ValueMapper<String, Iterable<String>>() {
@Override
public Iterable<String> apply(final String value) {
return Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"));
}
})
.groupBy(MockKeyValueMapper.<String, String>SelectValueMapper());
// Create a State Store for the all time word count
groupedByWord.count("word-count-store-" + inputTopic).to(Serdes.String(), Serdes.Long(), outputTopic);
// Create a Windowed State Store that contains the word count for every 1 minute
groupedByWord.count(TimeWindows.of(WINDOW_SIZE), "windowed-word-count-store-" + inputTopic);
return new KafkaStreams(builder, streamsConfiguration);
}
示例12: shouldObserveStreamElements
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
@Test
public void shouldObserveStreamElements() {
final KStreamBuilder builder = new KStreamBuilder();
final KStream<Integer, String> stream = builder.stream(intSerd, stringSerd, topicName);
final List<KeyValue<Integer, String>> peekObserved = new ArrayList<>(), streamObserved = new ArrayList<>();
stream.peek(collect(peekObserved)).foreach(collect(streamObserved));
driver = new KStreamTestDriver(builder);
final List<KeyValue<Integer, String>> expected = new ArrayList<>();
for (int key = 0; key < 32; key++) {
final String value = "V" + key;
driver.process(topicName, key, value);
expected.add(new KeyValue<>(key, value));
}
assertEquals(expected, peekObserved);
assertEquals(expected, streamObserved);
}
示例13: testPrintKeyValueWithName
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
@Test
public void testPrintKeyValueWithName() {
final KStreamPrint<Integer, String> kStreamPrint = new KStreamPrint<>(new PrintForeachAction(printWriter, "test-stream"), intSerd, stringSerd);
final List<KeyValue<Integer, String>> inputRecords = Arrays.asList(
new KeyValue<>(0, "zero"),
new KeyValue<>(1, "one"),
new KeyValue<>(2, "two"),
new KeyValue<>(3, "three"));
final String[] expectedResult = {"[test-stream]: 0, zero", "[test-stream]: 1, one", "[test-stream]: 2, two", "[test-stream]: 3, three"};
final KStreamBuilder builder = new KStreamBuilder();
final KStream<Integer, String> stream = builder.stream(intSerd, stringSerd, topicName);
stream.process(kStreamPrint);
driver = new KStreamTestDriver(builder);
for (KeyValue<Integer, String> record: inputRecords) {
driver.process(topicName, record.key, record.value);
}
printWriter.flush();
final String[] flushOutDatas = new String(byteOutStream.toByteArray(), Charset.forName("UTF-8")).split("\n");
for (int i = 0; i < flushOutDatas.length; i++) {
assertEquals(flushOutDatas[i], expectedResult[i]);
}
}
示例14: testFilter
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
@Test
public void testFilter() {
KStreamBuilder builder = new KStreamBuilder();
final int[] expectedKeys = new int[]{1, 2, 3, 4, 5, 6, 7};
KStream<Integer, String> stream;
MockProcessorSupplier<Integer, String> processor;
processor = new MockProcessorSupplier<>();
stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName);
stream.filter(isMultipleOfThree).process(processor);
driver = new KStreamTestDriver(builder);
for (int expectedKey : expectedKeys) {
driver.process(topicName, expectedKey, "V" + expectedKey);
}
assertEquals(2, processor.processed.size());
}
示例15: testFilterNot
import org.apache.kafka.streams.kstream.KStream; //导入依赖的package包/类
@Test
public void testFilterNot() {
KStreamBuilder builder = new KStreamBuilder();
final int[] expectedKeys = new int[]{1, 2, 3, 4, 5, 6, 7};
KStream<Integer, String> stream;
MockProcessorSupplier<Integer, String> processor;
processor = new MockProcessorSupplier<>();
stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName);
stream.filterNot(isMultipleOfThree).process(processor);
driver = new KStreamTestDriver(builder);
for (int expectedKey : expectedKeys) {
driver.process(topicName, expectedKey, "V" + expectedKey);
}
assertEquals(5, processor.processed.size());
}