本文整理汇总了Java中org.apache.kafka.streams.kstream.KStream.foreach方法的典型用法代码示例。如果您正苦于以下问题:Java KStream.foreach方法的具体用法?Java KStream.foreach怎么用?Java KStream.foreach使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.streams.kstream.KStream
的用法示例。
在下文中一共展示了KStream.foreach方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: buildStreams
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
private KStreamBuilder buildStreams() {
KStreamBuilder builder = new KStreamBuilder();
KStream<String, JsonNode> salesAggregated =
builder.stream(Serdes.String(), JSON_SERDE, "sales-aggregated");
salesAggregated.foreach(Aggregator::markAsProcessedInDatastore);
KStream<String, JsonNode> salesRaw =
builder.stream(Serdes.String(), JSON_SERDE, "sales-raw");
salesRaw.filterNot(Aggregator::isAlreadyProcessed)
.groupBy(Aggregator::groupByUserId, Serdes.String(), JSON_SERDE)
.aggregate(() -> null, Aggregator::aggregateValues, JSON_SERDE, "aggregated-interm")
.to(Serdes.String(), JSON_SERDE, "sales-aggregated");
return builder;
}
示例2: testForeach
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void testForeach() {
// Given
List<KeyValue<Integer, String>> inputRecords = Arrays.asList(
new KeyValue<>(0, "zero"),
new KeyValue<>(1, "one"),
new KeyValue<>(2, "two"),
new KeyValue<>(3, "three")
);
List<KeyValue<Integer, String>> expectedRecords = Arrays.asList(
new KeyValue<>(0, "ZERO"),
new KeyValue<>(2, "ONE"),
new KeyValue<>(4, "TWO"),
new KeyValue<>(6, "THREE")
);
final List<KeyValue<Integer, String>> actualRecords = new ArrayList<>();
ForeachAction<Integer, String> action =
new ForeachAction<Integer, String>() {
@Override
public void apply(Integer key, String value) {
actualRecords.add(new KeyValue<>(key * 2, value.toUpperCase(Locale.ROOT)));
}
};
// When
KStreamBuilder builder = new KStreamBuilder();
KStream<Integer, String> stream = builder.stream(intSerde, stringSerde, topicName);
stream.foreach(action);
// Then
driver = new KStreamTestDriver(builder);
for (KeyValue<Integer, String> record: inputRecords) {
driver.process(topicName, record.key, record.value);
}
assertEquals(expectedRecords.size(), actualRecords.size());
for (int i = 0; i < expectedRecords.size(); i++) {
KeyValue<Integer, String> expectedRecord = expectedRecords.get(i);
KeyValue<Integer, String> actualRecord = actualRecords.get(i);
assertEquals(expectedRecord, actualRecord);
}
}
示例3: aggregateAndPublish
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public void aggregateAndPublish(final KStream<K,T> in, final Producer<K,T> kafkaProducer, final String topic) {
in.foreach((k,t) -> aggregate(k,t));
addAction(new WindowAggregationAction<K, T>() {
@Override
public void onExpire(final Stream<KeyValue<K, T>> aggregatedStream, final Set<K> expiredKeys) {
aggregatedStream.map(kv -> new ProducerRecord<K,T>(topic, kv.key, kv.value))
.map(pr -> kafkaProducer.send(pr));
}
});
}
示例4: QueuedSchemaKStream
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
private QueuedSchemaKStream(final Schema schema,
final KStream kstream,
final Field keyField,
final List<SchemaKStream> sourceSchemaKStreams,
final Type type,
final FunctionRegistry functionRegistry,
final Optional<Integer> limit,
final OutputNode outputNode,
final SchemaRegistryClient schemaRegistryClient) {
super(schema, kstream, keyField, sourceSchemaKStreams, type, functionRegistry, schemaRegistryClient);
setOutputNode(outputNode);
kstream.foreach(new QueuedSchemaKStream.QueuePopulator(rowQueue, limit));
}
示例5: shouldKStreamGlobalKTableLeftJoin
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void shouldKStreamGlobalKTableLeftJoin() throws Exception {
final KStream<String, String> streamTableJoin = stream.leftJoin(globalTable, keyMapper, joiner);
streamTableJoin.foreach(foreachAction);
produceInitialGlobalTableValues();
startStreams();
produceTopicValues(inputStream);
final Map<String, String> expected = new HashMap<>();
expected.put("a", "1+A");
expected.put("b", "2+B");
expected.put("c", "3+C");
expected.put("d", "4+D");
expected.put("e", "5+null");
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return results.equals(expected);
}
}, 30000L, "waiting for initial values");
produceGlobalTableValues();
final ReadOnlyKeyValueStore<Long, String> replicatedStore = kafkaStreams.store(globalStore, QueryableStoreTypes.<Long, String>keyValueStore());
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return "J".equals(replicatedStore.get(5L));
}
}, 30000, "waiting for data in replicated store");
produceTopicValues(inputStream);
expected.put("a", "1+F");
expected.put("b", "2+G");
expected.put("c", "3+H");
expected.put("d", "4+I");
expected.put("e", "5+J");
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return results.equals(expected);
}
}, 30000L, "waiting for final values");
}
示例6: shouldKStreamGlobalKTableJoin
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void shouldKStreamGlobalKTableJoin() throws Exception {
final KStream<String, String> streamTableJoin = stream.join(globalTable, keyMapper, joiner);
streamTableJoin.foreach(foreachAction);
produceInitialGlobalTableValues();
startStreams();
produceTopicValues(inputStream);
final Map<String, String> expected = new HashMap<>();
expected.put("a", "1+A");
expected.put("b", "2+B");
expected.put("c", "3+C");
expected.put("d", "4+D");
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return results.equals(expected);
}
}, 30000L, "waiting for initial values");
produceGlobalTableValues();
final ReadOnlyKeyValueStore<Long, String> replicatedStore = kafkaStreams.store(globalStore, QueryableStoreTypes.<Long, String>keyValueStore());
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return "J".equals(replicatedStore.get(5L));
}
}, 30000, "waiting for data in replicated store");
produceTopicValues(inputStream);
expected.put("a", "1+F");
expected.put("b", "2+G");
expected.put("c", "3+H");
expected.put("d", "4+I");
expected.put("e", "5+J");
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return results.equals(expected);
}
}, 30000L, "waiting for final values");
}
示例7: main
import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
String zookeeperServer = args[0];
String kafkaServer = args[1];
String rawMetricsTopic = args[2];
String aggMetricsTopic = args[3];
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "metrics-aggregator");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServer);
props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeperServer);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, "1");
props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1 * 1000);
props.put(StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, MetricTimeExtractor.class.getName());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
StreamsConfig config = new StreamsConfig(props);
Serde<String> appIdSerde = Serdes.String();
Serde<String> metricNameSerde = Serdes.String();
CounterMetricSerde metricSerde = new CounterMetricSerde();
KStreamBuilder builder = new KStreamBuilder();
// --- First topology
KTable<String, CounterMetric> metricsStream = builder.table(appIdSerde, metricSerde, rawMetricsTopic, "raw-metrics");
// FIXME
metricsStream.foreach((key, value) -> System.out.println("RECEIVED - " + key + " --> " + value));
KStream<String, CounterMetric> metricValueStream = metricsStream
.groupBy((key, value) -> new KeyValue<>(value.getName(), value), metricNameSerde, metricSerde)
.reduce(CounterMetric::add, CounterMetric::subtract, "aggregates")
.toStream();
// FIXME
metricValueStream.foreach((key, value) -> System.out.println("OUTPUT - " + key + " --> " + value));
metricValueStream.to(metricNameSerde, metricSerde, aggMetricsTopic);
// --- Second topology
GraphiteReporter graphite = GraphiteReporter.builder()
.hostname("localhost")
.port(2003)
.build();
KStream<String, CounterMetric> aggMetricsStream = builder.stream(metricNameSerde, metricSerde, aggMetricsTopic);
aggMetricsStream.foreach((key, metric) -> graphite.send(metric));
// ---
KafkaStreams streams = new KafkaStreams(builder, config);
streams.start();
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}