当前位置: 首页>>代码示例>>Java>>正文


Java KStream.foreach方法代码示例

本文整理汇总了Java中org.apache.kafka.streams.kstream.KStream.foreach方法的典型用法代码示例。如果您正苦于以下问题:Java KStream.foreach方法的具体用法?Java KStream.foreach怎么用?Java KStream.foreach使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.streams.kstream.KStream的用法示例。


在下文中一共展示了KStream.foreach方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: buildStreams

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
private KStreamBuilder buildStreams() {
    KStreamBuilder builder = new KStreamBuilder();

    KStream<String, JsonNode> salesAggregated =
        builder.stream(Serdes.String(), JSON_SERDE, "sales-aggregated");

    salesAggregated.foreach(Aggregator::markAsProcessedInDatastore);

    KStream<String, JsonNode> salesRaw =
        builder.stream(Serdes.String(), JSON_SERDE, "sales-raw");

    salesRaw.filterNot(Aggregator::isAlreadyProcessed)
        .groupBy(Aggregator::groupByUserId, Serdes.String(), JSON_SERDE)
        .aggregate(() -> null, Aggregator::aggregateValues, JSON_SERDE, "aggregated-interm")
        .to(Serdes.String(), JSON_SERDE, "sales-aggregated");

    return builder;
}
 
开发者ID:kevinsimard,项目名称:kafka-streams-aggregator,代码行数:19,代码来源:Aggregator.java

示例2: testForeach

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void testForeach() {
    // Given
    List<KeyValue<Integer, String>> inputRecords = Arrays.asList(
        new KeyValue<>(0, "zero"),
        new KeyValue<>(1, "one"),
        new KeyValue<>(2, "two"),
        new KeyValue<>(3, "three")
    );

    List<KeyValue<Integer, String>> expectedRecords = Arrays.asList(
        new KeyValue<>(0, "ZERO"),
        new KeyValue<>(2, "ONE"),
        new KeyValue<>(4, "TWO"),
        new KeyValue<>(6, "THREE")
    );

    final List<KeyValue<Integer, String>> actualRecords = new ArrayList<>();
    ForeachAction<Integer, String> action =
        new ForeachAction<Integer, String>() {
            @Override
            public void apply(Integer key, String value) {
                actualRecords.add(new KeyValue<>(key * 2, value.toUpperCase(Locale.ROOT)));
            }
        };

    // When
    KStreamBuilder builder = new KStreamBuilder();
    KStream<Integer, String> stream = builder.stream(intSerde, stringSerde, topicName);
    stream.foreach(action);

    // Then
    driver = new KStreamTestDriver(builder);
    for (KeyValue<Integer, String> record: inputRecords) {
        driver.process(topicName, record.key, record.value);
    }

    assertEquals(expectedRecords.size(), actualRecords.size());
    for (int i = 0; i < expectedRecords.size(); i++) {
        KeyValue<Integer, String> expectedRecord = expectedRecords.get(i);
        KeyValue<Integer, String> actualRecord = actualRecords.get(i);
        assertEquals(expectedRecord, actualRecord);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:45,代码来源:KStreamForeachTest.java

示例3: aggregateAndPublish

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public void aggregateAndPublish(final KStream<K,T> in, final Producer<K,T> kafkaProducer, final String topic) {
	in.foreach((k,t) -> aggregate(k,t));		
	addAction(new WindowAggregationAction<K, T>() {
		@Override
		public void onExpire(final Stream<KeyValue<K, T>> aggregatedStream, final Set<K> expiredKeys) {
			aggregatedStream.map(kv -> new ProducerRecord<K,T>(topic, kv.key, kv.value))
				.map(pr -> kafkaProducer.send(pr));
					
		}
	});
}
 
开发者ID:nickman,项目名称:HeliosStreams,代码行数:12,代码来源:WindowAggregation.java

示例4: QueuedSchemaKStream

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
private QueuedSchemaKStream(final Schema schema,
                            final KStream kstream,
                            final Field keyField,
                            final List<SchemaKStream> sourceSchemaKStreams,
                            final Type type,
                            final FunctionRegistry functionRegistry,
                            final Optional<Integer> limit,
                            final OutputNode outputNode,
                            final SchemaRegistryClient schemaRegistryClient) {
  super(schema, kstream, keyField, sourceSchemaKStreams, type, functionRegistry, schemaRegistryClient);
  setOutputNode(outputNode);
  kstream.foreach(new QueuedSchemaKStream.QueuePopulator(rowQueue, limit));
}
 
开发者ID:confluentinc,项目名称:ksql,代码行数:14,代码来源:QueuedSchemaKStream.java

示例5: shouldKStreamGlobalKTableLeftJoin

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void shouldKStreamGlobalKTableLeftJoin() throws Exception {
    final KStream<String, String> streamTableJoin = stream.leftJoin(globalTable, keyMapper, joiner);
    streamTableJoin.foreach(foreachAction);
    produceInitialGlobalTableValues();
    startStreams();
    produceTopicValues(inputStream);

    final Map<String, String> expected = new HashMap<>();
    expected.put("a", "1+A");
    expected.put("b", "2+B");
    expected.put("c", "3+C");
    expected.put("d", "4+D");
    expected.put("e", "5+null");

    TestUtils.waitForCondition(new TestCondition() {
        @Override
        public boolean conditionMet() {
            return results.equals(expected);
        }
    }, 30000L, "waiting for initial values");


    produceGlobalTableValues();

    final ReadOnlyKeyValueStore<Long, String> replicatedStore = kafkaStreams.store(globalStore, QueryableStoreTypes.<Long, String>keyValueStore());

    TestUtils.waitForCondition(new TestCondition() {
        @Override
        public boolean conditionMet() {
            return "J".equals(replicatedStore.get(5L));
        }
    }, 30000, "waiting for data in replicated store");
    produceTopicValues(inputStream);

    expected.put("a", "1+F");
    expected.put("b", "2+G");
    expected.put("c", "3+H");
    expected.put("d", "4+I");
    expected.put("e", "5+J");

    TestUtils.waitForCondition(new TestCondition() {
        @Override
        public boolean conditionMet() {
            return results.equals(expected);
        }
    }, 30000L, "waiting for final values");
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:49,代码来源:GlobalKTableIntegrationTest.java

示例6: shouldKStreamGlobalKTableJoin

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void shouldKStreamGlobalKTableJoin() throws Exception {
    final KStream<String, String> streamTableJoin = stream.join(globalTable, keyMapper, joiner);
    streamTableJoin.foreach(foreachAction);
    produceInitialGlobalTableValues();
    startStreams();
    produceTopicValues(inputStream);

    final Map<String, String> expected = new HashMap<>();
    expected.put("a", "1+A");
    expected.put("b", "2+B");
    expected.put("c", "3+C");
    expected.put("d", "4+D");

    TestUtils.waitForCondition(new TestCondition() {
        @Override
        public boolean conditionMet() {
            return results.equals(expected);
        }
    }, 30000L, "waiting for initial values");


    produceGlobalTableValues();

    final ReadOnlyKeyValueStore<Long, String> replicatedStore = kafkaStreams.store(globalStore, QueryableStoreTypes.<Long, String>keyValueStore());

    TestUtils.waitForCondition(new TestCondition() {
        @Override
        public boolean conditionMet() {
            return "J".equals(replicatedStore.get(5L));
        }
    }, 30000, "waiting for data in replicated store");

    produceTopicValues(inputStream);

    expected.put("a", "1+F");
    expected.put("b", "2+G");
    expected.put("c", "3+H");
    expected.put("d", "4+I");
    expected.put("e", "5+J");

    TestUtils.waitForCondition(new TestCondition() {
        @Override
        public boolean conditionMet() {
            return results.equals(expected);
        }
    }, 30000L, "waiting for final values");
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:49,代码来源:GlobalKTableIntegrationTest.java

示例7: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {

        String zookeeperServer = args[0];
        String kafkaServer = args[1];
        String rawMetricsTopic = args[2];
        String aggMetricsTopic = args[3];

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "metrics-aggregator");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServer);
        props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeeperServer);
        props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, "1");
        props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1 * 1000);
        props.put(StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, MetricTimeExtractor.class.getName());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        StreamsConfig config = new StreamsConfig(props);

        Serde<String> appIdSerde = Serdes.String();
        Serde<String> metricNameSerde = Serdes.String();
        CounterMetricSerde metricSerde = new CounterMetricSerde();

        KStreamBuilder builder = new KStreamBuilder();

        // --- First topology

        KTable<String, CounterMetric> metricsStream = builder.table(appIdSerde, metricSerde, rawMetricsTopic, "raw-metrics");

        // FIXME
        metricsStream.foreach((key, value) -> System.out.println("RECEIVED - " + key + " --> " + value));

        KStream<String, CounterMetric> metricValueStream = metricsStream
                .groupBy((key, value) -> new KeyValue<>(value.getName(), value), metricNameSerde, metricSerde)
                .reduce(CounterMetric::add, CounterMetric::subtract, "aggregates")
                .toStream();

        // FIXME
        metricValueStream.foreach((key, value) -> System.out.println("OUTPUT - " + key + " --> " + value));

        metricValueStream.to(metricNameSerde, metricSerde, aggMetricsTopic);

        // --- Second topology

        GraphiteReporter graphite = GraphiteReporter.builder()
                .hostname("localhost")
                .port(2003)
                .build();

        KStream<String, CounterMetric> aggMetricsStream = builder.stream(metricNameSerde, metricSerde, aggMetricsTopic);
        aggMetricsStream.foreach((key, metric) -> graphite.send(metric));

        // ---

        KafkaStreams streams = new KafkaStreams(builder, config);
        streams.start();

        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
    }
 
开发者ID:aseigneurin,项目名称:kafka-sandbox,代码行数:58,代码来源:MetricsAggregator.java


注:本文中的org.apache.kafka.streams.kstream.KStream.foreach方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。