当前位置: 首页>>代码示例>>Java>>正文


Java KStreamBuilder.stream方法代码示例

本文整理汇总了Java中org.apache.kafka.streams.kstream.KStreamBuilder.stream方法的典型用法代码示例。如果您正苦于以下问题:Java KStreamBuilder.stream方法的具体用法?Java KStreamBuilder.stream怎么用?Java KStreamBuilder.stream使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.streams.kstream.KStreamBuilder的用法示例。


在下文中一共展示了KStreamBuilder.stream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "IP-Fraud-Detection");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();

    KStreamBuilder fraudDetectionTopology = new KStreamBuilder();

    KStream<String, String> ipRecords = fraudDetectionTopology.stream(stringSerde, stringSerde, propertyReader.getPropertyValue("topic"));

    KStream<String, String> fraudIpRecords = ipRecords
            .filter((k, v) -> isFraud(v));

    fraudIpRecords.to(propertyReader.getPropertyValue("output_topic"));

    KafkaStreams streamManager = new KafkaStreams(fraudDetectionTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:25,代码来源:IPFraudKafkaStreamApp.java

示例2: testFilterNot

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testFilterNot() {
    KStreamBuilder builder = new KStreamBuilder();
    final int[] expectedKeys = new int[]{1, 2, 3, 4, 5, 6, 7};

    KStream<Integer, String> stream;
    MockProcessorSupplier<Integer, String> processor;

    processor = new MockProcessorSupplier<>();
    stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName);
    stream.filterNot(isMultipleOfThree).process(processor);

    driver = new KStreamTestDriver(builder);
    for (int expectedKey : expectedKeys) {
        driver.process(topicName, expectedKey, "V" + expectedKey);
    }

    assertEquals(5, processor.processed.size());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:KStreamFilterTest.java

示例3: joinTopology

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
public static KStreamBuilder joinTopology(KStreamBuilder builder) {
    KStream<String, Integer> kStreamA = builder.stream(stringSerde, integerSerde, INPUT_TOPIC_A);
    KStream<String, Integer> kStreamB = builder.stream(stringSerde, integerSerde, INPUT_TOPIC_B);

    KTable<String, Integer> table = kStreamA
        .groupByKey(stringSerde, integerSerde)
        .aggregate(() -> 0, (k, v, t) -> v, integerSerde, STORAGE_NAME);

    kStreamB
        .leftJoin(table, (v1, v2) -> v1 + v2, stringSerde, integerSerde)
        .to(stringSerde, integerSerde, OUTPUT_TOPIC_A);

    kStreamB
        .leftJoin(table, (v1, v2) -> v1 - v2, stringSerde, integerSerde)
        .to(stringSerde, integerSerde, OUTPUT_TOPIC_B);

    return builder;
}
 
开发者ID:carlosmenezes,项目名称:mockafka,代码行数:19,代码来源:TopologyUtil.java

示例4: main

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
public static void main(String[] args) {

        Properties config = new Properties();
        config.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-starter-app");
        config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> kStream = builder.stream("streams-file-input");
        // do stuff
        kStream.to("streams-wordcount-output");

        KafkaStreams streams = new KafkaStreams(builder, config);
        streams.cleanUp(); // only do this in dev - not in prod
        streams.start();

        // print the topology
        System.out.println(streams.toString());

        // shutdown hook to correctly close the streams application
        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

    }
 
开发者ID:kaiwaehner,项目名称:kafka-streams-machine-learning-examples,代码行数:27,代码来源:StreamsStarterApp.java

示例5: testFilter

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testFilter() {
    KStreamBuilder builder = new KStreamBuilder();
    final int[] expectedKeys = new int[]{1, 2, 3, 4, 5, 6, 7};

    KStream<Integer, String> stream;
    MockProcessorSupplier<Integer, String> processor;

    processor = new MockProcessorSupplier<>();
    stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName);
    stream.filter(isMultipleOfThree).process(processor);

    driver = new KStreamTestDriver(builder);
    for (int expectedKey : expectedKeys) {
        driver.process(topicName, expectedKey, "V" + expectedKey);
    }

    assertEquals(2, processor.processed.size());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:KStreamFilterTest.java

示例6: before

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Before
public void before() throws InterruptedException {
    testNo++;
    String applicationId = "kstream-repartition-join-test-" + testNo;
    builder = new KStreamBuilder();
    createTopics();
    streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, COMMIT_INTERVAL_MS);
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
    streamsConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 3);
    streamsConfiguration.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true);

    streamOne = builder.stream(Serdes.Long(), Serdes.Integer(), streamOneInput);
    streamTwo = builder.stream(Serdes.Integer(), Serdes.String(), streamTwoInput);
    streamFour = builder.stream(Serdes.Integer(), Serdes.String(), streamFourInput);

    keyMapper = MockKeyValueMapper.SelectValueKeyValueMapper();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:22,代码来源:KStreamRepartitionJoinTest.java

示例7: testPrintKeyValueWithName

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testPrintKeyValueWithName() {
    final KStreamPrint<Integer, String> kStreamPrint = new KStreamPrint<>(new PrintForeachAction(printWriter, "test-stream"), intSerd, stringSerd);

    final List<KeyValue<Integer, String>> inputRecords = Arrays.asList(
            new KeyValue<>(0, "zero"),
            new KeyValue<>(1, "one"),
            new KeyValue<>(2, "two"),
            new KeyValue<>(3, "three"));
    
    final String[] expectedResult = {"[test-stream]: 0, zero", "[test-stream]: 1, one", "[test-stream]: 2, two", "[test-stream]: 3, three"};
    
    final KStreamBuilder builder = new KStreamBuilder();
    final KStream<Integer, String> stream = builder.stream(intSerd, stringSerd, topicName);
    stream.process(kStreamPrint);
    
    driver = new KStreamTestDriver(builder);
    for (KeyValue<Integer, String> record: inputRecords) {
        driver.process(topicName, record.key, record.value);
    }
    printWriter.flush();
    final String[] flushOutDatas = new String(byteOutStream.toByteArray(), Charset.forName("UTF-8")).split("\n");
    for (int i = 0; i < flushOutDatas.length; i++) {
        assertEquals(flushOutDatas[i], expectedResult[i]);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:KStreamPrintTest.java

示例8: testFlatMapValues

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testFlatMapValues() {
    KStreamBuilder builder = new KStreamBuilder();

    ValueMapper<CharSequence, Integer> mapper =
        new ValueMapper<CharSequence, Integer>() {
            @Override
            public Integer apply(CharSequence value) {
                return value.length();
            }
        };

    final int[] expectedKeys = {1, 10, 100, 1000};

    KStream<Integer, String> stream;
    MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>();
    stream = builder.stream(intSerde, stringSerde, topicName);
    stream.mapValues(mapper).process(processor);

    driver = new KStreamTestDriver(builder);
    for (int expectedKey : expectedKeys) {
        driver.process(topicName, expectedKey, Integer.toString(expectedKey));
    }

    assertEquals(4, processor.processed.size());

    String[] expected = {"1:1", "10:2", "100:3", "1000:4"};

    for (int i = 0; i < expected.length; i++) {
        assertEquals(expected[i], processor.processed.get(i));
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:33,代码来源:KStreamMapValuesTest.java

示例9: main

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streaming-example");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1500);

//        To get data produced before process started
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> source = builder.stream("data-in");

        KStream<String, String> stats = source.groupByKey()
                .aggregate(KafkaStreamingStatistics::new,
                    (k, v, clusterstats) -> clusterstats.add(v),
                    TimeWindows.of(60000).advanceBy(10000),
                    Serdes.serdeFrom(new MySerde(), new MySerde()),
                    "data-store")
                .toStream((key, value) -> key.key().toString() + " " + key.window().start())
                .mapValues((job) -> job.computeAvgTime().toString());

        stats.to(Serdes.String(), Serdes.String(),  "data-out");

        KafkaStreams streams = new KafkaStreams(builder, props);

        streams.cleanUp();
        streams.start();

        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
    }
 
开发者ID:ebi-wp,项目名称:kafka-streams-api-websockets,代码行数:36,代码来源:KafkaStreamingMain.java

示例10: testMap

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testMap() {
    KStreamBuilder builder = new KStreamBuilder();

    KeyValueMapper<Integer, String, KeyValue<String, Integer>> mapper =
        new KeyValueMapper<Integer, String, KeyValue<String, Integer>>() {
            @Override
            public KeyValue<String, Integer> apply(Integer key, String value) {
                return KeyValue.pair(value, key);
            }
        };

    final int[] expectedKeys = new int[]{0, 1, 2, 3};

    KStream<Integer, String> stream = builder.stream(intSerde, stringSerde, topicName);
    MockProcessorSupplier<String, Integer> processor;

    processor = new MockProcessorSupplier<>();
    stream.map(mapper).process(processor);

    driver = new KStreamTestDriver(builder);
    for (int expectedKey : expectedKeys) {
        driver.process(topicName, expectedKey, "V" + expectedKey);
    }

    assertEquals(4, processor.processed.size());

    String[] expected = new String[]{"V0:0", "V1:1", "V2:2", "V3:3"};

    for (int i = 0; i < expected.length; i++) {
        assertEquals(expected[i], processor.processed.get(i));
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:34,代码来源:KStreamMapTest.java

示例11: testFlatMap

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testFlatMap() {
    KStreamBuilder builder = new KStreamBuilder();

    KeyValueMapper<Number, Object, Iterable<KeyValue<String, String>>> mapper =
        new KeyValueMapper<Number, Object, Iterable<KeyValue<String, String>>>() {
            @Override
            public Iterable<KeyValue<String, String>> apply(Number key, Object value) {
                ArrayList<KeyValue<String, String>> result = new ArrayList<>();
                for (int i = 0; i < key.intValue(); i++) {
                    result.add(KeyValue.pair(Integer.toString(key.intValue() * 10 + i), value.toString()));
                }
                return result;
            }
        };

    final int[] expectedKeys = {0, 1, 2, 3};

    KStream<Integer, String> stream;
    MockProcessorSupplier<String, String> processor;

    processor = new MockProcessorSupplier<>();
    stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName);
    stream.flatMap(mapper).process(processor);

    driver = new KStreamTestDriver(builder);
    for (int expectedKey : expectedKeys) {
        driver.process(topicName, expectedKey, "V" + expectedKey);
    }

    assertEquals(6, processor.processed.size());

    String[] expected = {"10:V1", "20:V2", "21:V2", "30:V3", "31:V3", "32:V3"};

    for (int i = 0; i < expected.length; i++) {
        assertEquals(expected[i], processor.processed.get(i));
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:39,代码来源:KStreamFlatMapTest.java

示例12: createKafkaStreamsKStreamKStreamJoin

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
private KafkaStreams createKafkaStreamsKStreamKStreamJoin(Properties streamConfig, String kStreamTopic1,
                                                          String kStreamTopic2, final CountDownLatch latch) {
    final KStreamBuilder builder = new KStreamBuilder();

    final KStream<Long, byte[]> input1 = builder.stream(kStreamTopic1);
    final KStream<Long, byte[]> input2 = builder.stream(kStreamTopic2);
    final long timeDifferenceMs = 10000L;

    input1.leftJoin(input2, VALUE_JOINER, JoinWindows.of(timeDifferenceMs)).foreach(new CountDownAction(latch));

    return createKafkaStreamsWithExceptionHandler(builder, streamConfig);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:13,代码来源:SimpleBenchmark.java

示例13: testForeach

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test
public void testForeach() {
    // Given
    List<KeyValue<Integer, String>> inputRecords = Arrays.asList(
        new KeyValue<>(0, "zero"),
        new KeyValue<>(1, "one"),
        new KeyValue<>(2, "two"),
        new KeyValue<>(3, "three")
    );

    List<KeyValue<Integer, String>> expectedRecords = Arrays.asList(
        new KeyValue<>(0, "ZERO"),
        new KeyValue<>(2, "ONE"),
        new KeyValue<>(4, "TWO"),
        new KeyValue<>(6, "THREE")
    );

    final List<KeyValue<Integer, String>> actualRecords = new ArrayList<>();
    ForeachAction<Integer, String> action =
        new ForeachAction<Integer, String>() {
            @Override
            public void apply(Integer key, String value) {
                actualRecords.add(new KeyValue<>(key * 2, value.toUpperCase(Locale.ROOT)));
            }
        };

    // When
    KStreamBuilder builder = new KStreamBuilder();
    KStream<Integer, String> stream = builder.stream(intSerde, stringSerde, topicName);
    stream.foreach(action);

    // Then
    driver = new KStreamTestDriver(builder);
    for (KeyValue<Integer, String> record: inputRecords) {
        driver.process(topicName, record.key, record.value);
    }

    assertEquals(expectedRecords.size(), actualRecords.size());
    for (int i = 0; i < expectedRecords.size(); i++) {
        KeyValue<Integer, String> expectedRecord = expectedRecords.get(i);
        KeyValue<Integer, String> actualRecord = actualRecords.get(i);
        assertEquals(expectedRecord, actualRecord);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:45,代码来源:KStreamForeachTest.java

示例14: createCountStreams

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
private KafkaStreams createCountStreams(Properties streamConfig, String topic, final CountDownLatch latch) {
    final KStreamBuilder builder = new KStreamBuilder();
    final KStream<Integer, byte[]> input = builder.stream(topic);

    input.groupByKey()
        .count("tmpStoreName").foreach(new CountDownAction(latch));

    return new KafkaStreams(builder, streamConfig);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:SimpleBenchmark.java

示例15: shouldThrowExceptionOverlappingTopic

import org.apache.kafka.streams.kstream.KStreamBuilder; //导入方法依赖的package包/类
@Test(expected = TopologyBuilderException.class)
public void shouldThrowExceptionOverlappingTopic() throws  Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    //NOTE this would realistically get caught when building topology, the test is for completeness
    builder.stream(KStreamBuilder.AutoOffsetReset.EARLIEST, Pattern.compile("topic-[A-D]_1"));
    builder.stream(KStreamBuilder.AutoOffsetReset.LATEST, Pattern.compile("topic-\\d_1"));
    builder.stream(KStreamBuilder.AutoOffsetReset.LATEST, TOPIC_A_1, TOPIC_Z_1);

    builder.latestResetTopicsPattern();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:KStreamsFineGrainedAutoResetIntegrationTest.java


注:本文中的org.apache.kafka.streams.kstream.KStreamBuilder.stream方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。