当前位置: 首页>>代码示例>>Java>>正文


Java KStream.to方法代码示例

本文整理汇总了Java中org.apache.kafka.streams.kstream.KStream.to方法的典型用法代码示例。如果您正苦于以下问题:Java KStream.to方法的具体用法?Java KStream.to怎么用?Java KStream.to使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.streams.kstream.KStream的用法示例。


在下文中一共展示了KStream.to方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();
    Serde<Long> longSerde = Serdes.Long();

    KStreamBuilder streamTopology = new KStreamBuilder();
    KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input");
    KStream<String, Long> wordCounts = topicRecords
            .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
            .map((key, word) -> new KeyValue<>(word, word))
            .countByKey("Count")
            .toStream();
    wordCounts.to(stringSerde, longSerde, "wordCount");

    KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:26,代码来源:KafkaStreamWordCount.java

示例2: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "IP-Fraud-Detection");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();

    KStreamBuilder fraudDetectionTopology = new KStreamBuilder();

    KStream<String, String> ipRecords = fraudDetectionTopology.stream(stringSerde, stringSerde, propertyReader.getPropertyValue("topic"));

    KStream<String, String> fraudIpRecords = ipRecords
            .filter((k, v) -> isFraud(v));

    fraudIpRecords.to(propertyReader.getPropertyValue("output_topic"));

    KafkaStreams streamManager = new KafkaStreams(fraudDetectionTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
开发者ID:PacktPublishing,项目名称:Building-Data-Streaming-Applications-with-Apache-Kafka,代码行数:25,代码来源:IPFraudKafkaStreamApp.java

示例3: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) {

        Properties config = new Properties();
        config.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-starter-app");
        config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> kStream = builder.stream("streams-file-input");
        // do stuff
        kStream.to("streams-wordcount-output");

        KafkaStreams streams = new KafkaStreams(builder, config);
        streams.cleanUp(); // only do this in dev - not in prod
        streams.start();

        // print the topology
        System.out.println(streams.toString());

        // shutdown hook to correctly close the streams application
        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

    }
 
开发者ID:kaiwaehner,项目名称:kafka-streams-machine-learning-examples,代码行数:27,代码来源:StreamsStarterApp.java

示例4: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streaming-example");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
        props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1500);

//        To get data produced before process started
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> source = builder.stream("data-in");

        KStream<String, String> stats = source.groupByKey()
                .aggregate(KafkaStreamingStatistics::new,
                    (k, v, clusterstats) -> clusterstats.add(v),
                    TimeWindows.of(60000).advanceBy(10000),
                    Serdes.serdeFrom(new MySerde(), new MySerde()),
                    "data-store")
                .toStream((key, value) -> key.key().toString() + " " + key.window().start())
                .mapValues((job) -> job.computeAvgTime().toString());

        stats.to(Serdes.String(), Serdes.String(),  "data-out");

        KafkaStreams streams = new KafkaStreams(builder, props);

        streams.cleanUp();
        streams.start();

        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
    }
 
开发者ID:ebi-wp,项目名称:kafka-streams-api-websockets,代码行数:36,代码来源:KafkaStreamingMain.java

示例5: shouldThrowStreamsExceptionNoResetSpecified

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void shouldThrowStreamsExceptionNoResetSpecified() throws Exception {
    Properties props = new Properties();
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");

    Properties localConfig = StreamsTestUtils.getStreamsConfig(
            "testAutoOffsetWithNone",
            CLUSTER.bootstrapServers(),
            STRING_SERDE_CLASSNAME,
            STRING_SERDE_CLASSNAME,
            props);

    final KStreamBuilder builder = new KStreamBuilder();
    final KStream<String, String> exceptionStream = builder.stream(NOOP);

    exceptionStream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);

    KafkaStreams streams = new KafkaStreams(builder, localConfig);

    final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler();

    final TestCondition correctExceptionThrownCondition = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return uncaughtExceptionHandler.correctExceptionThrown;
        }
    };

    streams.setUncaughtExceptionHandler(uncaughtExceptionHandler);
    streams.start();
    TestUtils.waitForCondition(correctExceptionThrownCondition, "The expected NoOffsetForPartitionException was never thrown");
    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:34,代码来源:KStreamsFineGrainedAutoResetIntegrationTest.java

示例6: createKafkaStreamsWithSink

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
private KafkaStreams createKafkaStreamsWithSink(String topic, final CountDownLatch latch) {
    final Properties props = setStreamProperties("simple-benchmark-streams-with-sink");

    KStreamBuilder builder = new KStreamBuilder();

    KStream<Integer, byte[]> source = builder.stream(INTEGER_SERDE, BYTE_SERDE, topic);

    source.to(INTEGER_SERDE, BYTE_SERDE, SINK_TOPIC);
    source.process(new ProcessorSupplier<Integer, byte[]>() {
        @Override
        public Processor<Integer, byte[]> get() {
            return new AbstractProcessor<Integer, byte[]>() {
                @Override
                public void init(ProcessorContext context) {
                }

                @Override
                public void process(Integer key, byte[] value) {
                    processedRecords++;
                    processedBytes += value.length + Integer.SIZE;
                    if (processedRecords == numRecords) {
                        latch.countDown();
                    }
                }

                @Override
                public void punctuate(long timestamp) {
                }

                @Override
                public void close() {
                }
            };
        }
    });

    return createKafkaStreamsWithExceptionHandler(builder, props);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:39,代码来源:SimpleBenchmark.java

示例7: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main( String[] args ) {
    Properties streamsConfig = new Properties();
    // The name must be unique on the Kafka cluster
    streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-example");
    // Brokers
    streamsConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, args[0]);
    // Zookeeper
    streamsConfig.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, args[1]);
    // SerDes for key and values
    streamsConfig.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfig.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    // Serdes for the word and count
    Serde<String> stringSerde = Serdes.String();
    Serde<Long> longSerde = Serdes.Long();

    KStreamBuilder builder = new KStreamBuilder();
    KStream<String, String> sentences = builder.stream(stringSerde, stringSerde, "test");
    KStream<String, Long> wordCounts = sentences
            .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
            .map((key, word) -> new KeyValue<>(word, word))
            .through("RekeyedIntermediateTopic")
            .countByKey("Counts")
            .toStream();
    wordCounts.to(stringSerde, longSerde, "wordcounts");

    KafkaStreams streams = new KafkaStreams(builder, streamsConfig);
    streams.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
开发者ID:Azure-Samples,项目名称:hdinsight-kafka-java-get-started,代码行数:32,代码来源:Stream.java

示例8: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
/** Connects the topic "console" with the topic "exclaimed", adding two
   *  exclamation points to the input values.
   * 
   * @param args Not used.
   */
  public static void main(String[] args) {
      
      // Configuration stuff.
      Properties config = new Properties();
     
      // For the cluster. Assumes everything is local.
      config.put(StreamsConfig.APPLICATION_ID_CONFIG, 
	"exclamation-kafka-streams");
      config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
      config.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
      
      // Serde.
config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG,
	Serdes.ByteArray().getClass().getName());
config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG,
	Serdes.String().getClass().getName());
      
      KStreamBuilder builder = new KStreamBuilder();
      
      // Read the stream from the topic into a KStream.
      KStream<byte[], String> text = builder.stream("console");
      
      // Apply the transformation.
      KStream<byte[], String> exclamation = 
          text.mapValues(x -> x + "!")
              .mapValues(x -> x + "!");
      
      // Sink it. Uses the configured serializers.
      exclamation.to("exclamated");
      
      // Build and run.
      KafkaStreams streams = new KafkaStreams(builder, config);
      
      streams.start();
  }
 
开发者ID:timothyrenner,项目名称:kafka-streams-ex,代码行数:41,代码来源:ExclamationKafkaStream.java

示例9: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) {

        String bootstrapServers = System.getenv("KAFKA_BOOTSTRAP_SERVERS");
        LOG.info("KAFKA_BOOTSTRAP_SERVERS = {}", bootstrapServers);

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_NAME);
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        KStreamBuilder builder = new KStreamBuilder();

        KStream<String, String> source = builder.stream(sourceAddress);

        KStream<Windowed<String>, String> max = source
                /*.selectKey((key, value, newKey) -> {
                    return "temp";
                })*/
                .selectKey(new KeyValueMapper<String, String, String>() {
                    @Override
                    public String apply(String key, String value) {
                        return "temp";
                    }
                })
                .groupByKey()
                .reduce((a,b) -> {
                    if (Integer.parseInt(a) > Integer.parseInt(b))
                        return a;
                    else
                        return b;
                }, TimeWindows.of(TimeUnit.SECONDS.toMillis(5000)))
                .toStream();

        WindowedSerializer<String> windowedSerializer = new WindowedSerializer<>(Serdes.String().serializer());
        WindowedDeserializer<String> windowedDeserializer = new WindowedDeserializer<>(Serdes.String().deserializer());
        Serde<Windowed<String>> windowedSerde = Serdes.serdeFrom(windowedSerializer, windowedDeserializer);

        // need to override key serde to Windowed<String> type
        max.to(windowedSerde, Serdes.String(), destinationAddress);

        final KafkaStreams streams = new KafkaStreams(builder, props);

        final CountDownLatch latch = new CountDownLatch(1);

        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread("streams-temperature-shutdown-hook") {
            @Override
            public void run() {
                streams.close();
                latch.countDown();
            }
        });

        try {
            streams.start();
            latch.await();
        } catch (Throwable e) {
            System.exit(1);
        }
        System.exit(0);
    }
 
开发者ID:ppatierno,项目名称:enmasse-iot-demo,代码行数:65,代码来源:KafkaTemperature.java

示例10: main

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pageview-untyped");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, JsonTimestampExtractor.class);
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);

    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    KStreamBuilder builder = new KStreamBuilder();

    final Serializer<JsonNode> jsonSerializer = new JsonSerializer();
    final Deserializer<JsonNode> jsonDeserializer = new JsonDeserializer();
    final Serde<JsonNode> jsonSerde = Serdes.serdeFrom(jsonSerializer, jsonDeserializer);

    KStream<String, JsonNode> views = builder.stream(Serdes.String(), jsonSerde, "streams-pageview-input");

    KTable<String, JsonNode> users = builder.table(Serdes.String(), jsonSerde,
        "streams-userprofile-input", "streams-userprofile-store-name");

    KTable<String, String> userRegions = users.mapValues(new ValueMapper<JsonNode, String>() {
        @Override
        public String apply(JsonNode record) {
            return record.get("region").textValue();
        }
    });

    KStream<JsonNode, JsonNode> regionCount = views
            .leftJoin(userRegions, new ValueJoiner<JsonNode, String, JsonNode>() {
                @Override
                public JsonNode apply(JsonNode view, String region) {
                    ObjectNode jNode = JsonNodeFactory.instance.objectNode();

                    return jNode.put("user", view.get("user").textValue())
                            .put("page", view.get("page").textValue())
                            .put("region", region == null ? "UNKNOWN" : region);
                }
            })
            .map(new KeyValueMapper<String, JsonNode, KeyValue<String, JsonNode>>() {
                @Override
                public KeyValue<String, JsonNode> apply(String user, JsonNode viewRegion) {
                    return new KeyValue<>(viewRegion.get("region").textValue(), viewRegion);
                }
            })
            .groupByKey(Serdes.String(), jsonSerde)
            .count(TimeWindows.of(7 * 24 * 60 * 60 * 1000L).advanceBy(1000), "RollingSevenDaysOfPageViewsByRegion")
            // TODO: we can merge ths toStream().map(...) with a single toStream(...)
            .toStream()
            .map(new KeyValueMapper<Windowed<String>, Long, KeyValue<JsonNode, JsonNode>>() {
                @Override
                public KeyValue<JsonNode, JsonNode> apply(Windowed<String> key, Long value) {
                    ObjectNode keyNode = JsonNodeFactory.instance.objectNode();
                    keyNode.put("window-start", key.window().start())
                            .put("region", key.key());

                    ObjectNode valueNode = JsonNodeFactory.instance.objectNode();
                    valueNode.put("count", value);

                    return new KeyValue<>((JsonNode) keyNode, (JsonNode) valueNode);
                }
            });

    // write to the result topic
    regionCount.to(jsonSerde, jsonSerde, "streams-pageviewstats-untyped-output");

    KafkaStreams streams = new KafkaStreams(builder, props);
    streams.start();

    // usually the stream application would be running forever,
    // in this example we just let it run for some time and stop since the input data is finite.
    Thread.sleep(5000L);

    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:76,代码来源:PageViewUntypedDemo.java

示例11: testRegexMatchesTopicsAWhenCreated

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void testRegexMatchesTopicsAWhenCreated() throws Exception {

    final Serde<String> stringSerde = Serdes.String();
    final List<String> expectedFirstAssignment = Arrays.asList("TEST-TOPIC-1");
    final List<String> expectedSecondAssignment = Arrays.asList("TEST-TOPIC-1", "TEST-TOPIC-2");

    final StreamsConfig streamsConfig = new StreamsConfig(streamsConfiguration);

    CLUSTER.createTopic("TEST-TOPIC-1");

    final KStreamBuilder builder = new KStreamBuilder();

    final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-\\d"));

    pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);

    final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);

    final Field streamThreadsField = streams.getClass().getDeclaredField("threads");
    streamThreadsField.setAccessible(true);
    final StreamThread[] streamThreads = (StreamThread[]) streamThreadsField.get(streams);
    final StreamThread originalThread = streamThreads[0];

    final TestStreamThread testStreamThread = new TestStreamThread(builder, streamsConfig,
        new DefaultKafkaClientSupplier(),
        originalThread.applicationId, originalThread.clientId, originalThread.processId, new Metrics(), Time.SYSTEM);

    final TestCondition oneTopicAdded = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return testStreamThread.assignedTopicPartitions.equals(expectedFirstAssignment);
        }
    };

    streamThreads[0] = testStreamThread;
    streams.start();

    TestUtils.waitForCondition(oneTopicAdded, STREAM_TASKS_NOT_UPDATED);

    CLUSTER.createTopic("TEST-TOPIC-2");

    final TestCondition secondTopicAdded = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return testStreamThread.assignedTopicPartitions.equals(expectedSecondAssignment);
        }
    };

    TestUtils.waitForCondition(secondTopicAdded, STREAM_TASKS_NOT_UPDATED);

    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:54,代码来源:RegexSourceIntegrationTest.java

示例12: testRegexMatchesTopicsAWhenDeleted

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void testRegexMatchesTopicsAWhenDeleted() throws Exception {

    final Serde<String> stringSerde = Serdes.String();
    final List<String> expectedFirstAssignment = Arrays.asList("TEST-TOPIC-A", "TEST-TOPIC-B");
    final List<String> expectedSecondAssignment = Arrays.asList("TEST-TOPIC-B");

    final StreamsConfig streamsConfig = new StreamsConfig(streamsConfiguration);

    CLUSTER.createTopics("TEST-TOPIC-A", "TEST-TOPIC-B");

    final KStreamBuilder builder = new KStreamBuilder();

    final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-[A-Z]"));

    pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);

    final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);

    final Field streamThreadsField = streams.getClass().getDeclaredField("threads");
    streamThreadsField.setAccessible(true);
    final StreamThread[] streamThreads = (StreamThread[]) streamThreadsField.get(streams);
    final StreamThread originalThread = streamThreads[0];

    final TestStreamThread testStreamThread = new TestStreamThread(builder, streamsConfig,
        new DefaultKafkaClientSupplier(),
        originalThread.applicationId, originalThread.clientId, originalThread.processId, new Metrics(), Time.SYSTEM);

    streamThreads[0] = testStreamThread;

    final TestCondition bothTopicsAdded = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return testStreamThread.assignedTopicPartitions.equals(expectedFirstAssignment);
        }
    };
    streams.start();

    TestUtils.waitForCondition(bothTopicsAdded, STREAM_TASKS_NOT_UPDATED);

    CLUSTER.deleteTopic("TEST-TOPIC-A");

    final TestCondition oneTopicRemoved = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return testStreamThread.assignedTopicPartitions.equals(expectedSecondAssignment);
        }
    };

    TestUtils.waitForCondition(oneTopicRemoved, STREAM_TASKS_NOT_UPDATED);

    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:54,代码来源:RegexSourceIntegrationTest.java

示例13: testShouldReadFromRegexAndNamedTopics

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void testShouldReadFromRegexAndNamedTopics() throws Exception {

    final String topic1TestMessage = "topic-1 test";
    final String topic2TestMessage = "topic-2 test";
    final String topicATestMessage = "topic-A test";
    final String topicCTestMessage = "topic-C test";
    final String topicYTestMessage = "topic-Y test";
    final String topicZTestMessage = "topic-Z test";


    final Serde<String> stringSerde = Serdes.String();

    final KStreamBuilder builder = new KStreamBuilder();

    final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("topic-\\d"));
    final KStream<String, String> pattern2Stream = builder.stream(Pattern.compile("topic-[A-D]"));
    final KStream<String, String> namedTopicsStream = builder.stream(TOPIC_Y, TOPIC_Z);

    pattern1Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
    pattern2Stream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
    namedTopicsStream.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);

    final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
    streams.start();

    final Properties producerConfig = TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class);

    IntegrationTestUtils.produceValuesSynchronously(TOPIC_1, Arrays.asList(topic1TestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(TOPIC_2, Arrays.asList(topic2TestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(TOPIC_A, Arrays.asList(topicATestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(TOPIC_C, Arrays.asList(topicCTestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(TOPIC_Y, Arrays.asList(topicYTestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(TOPIC_Z, Arrays.asList(topicZTestMessage), producerConfig, mockTime);

    final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class);

    final List<String> expectedReceivedValues = Arrays.asList(topicATestMessage, topic1TestMessage, topic2TestMessage, topicCTestMessage, topicYTestMessage, topicZTestMessage);
    final List<KeyValue<String, String>> receivedKeyValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, DEFAULT_OUTPUT_TOPIC, 6);
    final List<String> actualValues = new ArrayList<>(6);

    for (final KeyValue<String, String> receivedKeyValue : receivedKeyValues) {
        actualValues.add(receivedKeyValue.value);
    }

    streams.close();
    Collections.sort(actualValues);
    Collections.sort(expectedReceivedValues);
    assertThat(actualValues, equalTo(expectedReceivedValues));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:51,代码来源:RegexSourceIntegrationTest.java

示例14: testMultipleConsumersCanReadFromPartitionedTopic

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
@Test
public void testMultipleConsumersCanReadFromPartitionedTopic() throws Exception {

    final Serde<String> stringSerde = Serdes.String();
    final KStreamBuilder builderLeader = new KStreamBuilder();
    final KStreamBuilder builderFollower = new KStreamBuilder();
    final List<String> expectedAssignment = Arrays.asList(PARTITIONED_TOPIC_1,  PARTITIONED_TOPIC_2);

    final KStream<String, String> partitionedStreamLeader = builderLeader.stream(Pattern.compile("partitioned-\\d"));
    final KStream<String, String> partitionedStreamFollower = builderFollower.stream(Pattern.compile("partitioned-\\d"));


    partitionedStreamLeader.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
    partitionedStreamFollower.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);

    final KafkaStreams partitionedStreamsLeader  = new KafkaStreams(builderLeader, streamsConfiguration);
    final KafkaStreams partitionedStreamsFollower  = new KafkaStreams(builderFollower, streamsConfiguration);

    final StreamsConfig streamsConfig = new StreamsConfig(streamsConfiguration);


    final Field leaderStreamThreadsField = partitionedStreamsLeader.getClass().getDeclaredField("threads");
    leaderStreamThreadsField.setAccessible(true);
    final StreamThread[] leaderStreamThreads = (StreamThread[]) leaderStreamThreadsField.get(partitionedStreamsLeader);
    final StreamThread originalLeaderThread = leaderStreamThreads[0];

    final TestStreamThread leaderTestStreamThread = new TestStreamThread(builderLeader, streamsConfig,
            new DefaultKafkaClientSupplier(),
            originalLeaderThread.applicationId, originalLeaderThread.clientId, originalLeaderThread.processId, new Metrics(), Time.SYSTEM);

    leaderStreamThreads[0] = leaderTestStreamThread;

    final TestCondition bothTopicsAddedToLeader = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return leaderTestStreamThread.assignedTopicPartitions.equals(expectedAssignment);
        }
    };



    final Field followerStreamThreadsField = partitionedStreamsFollower.getClass().getDeclaredField("threads");
    followerStreamThreadsField.setAccessible(true);
    final StreamThread[] followerStreamThreads = (StreamThread[]) followerStreamThreadsField.get(partitionedStreamsFollower);
    final StreamThread originalFollowerThread = followerStreamThreads[0];

    final TestStreamThread followerTestStreamThread = new TestStreamThread(builderFollower, streamsConfig,
            new DefaultKafkaClientSupplier(),
            originalFollowerThread.applicationId, originalFollowerThread.clientId, originalFollowerThread.processId, new Metrics(), Time.SYSTEM);

    followerStreamThreads[0] = followerTestStreamThread;


    final TestCondition bothTopicsAddedToFollower = new TestCondition() {
        @Override
        public boolean conditionMet() {
            return followerTestStreamThread.assignedTopicPartitions.equals(expectedAssignment);
        }
    };

    partitionedStreamsLeader.start();
    TestUtils.waitForCondition(bothTopicsAddedToLeader, "Topics never assigned to leader stream");


    partitionedStreamsFollower.start();
    TestUtils.waitForCondition(bothTopicsAddedToFollower, "Topics never assigned to follower stream");

    partitionedStreamsLeader.close();
    partitionedStreamsFollower.close();

}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:72,代码来源:RegexSourceIntegrationTest.java

示例15: shouldOnlyReadForEarliest

import org.apache.kafka.streams.kstream.KStream; //导入方法依赖的package包/类
private void shouldOnlyReadForEarliest(
    final String topicSuffix,
    final String topic1,
    final String topic2,
    final String topicA,
    final String topicC,
    final String topicY,
    final String topicZ,
    final String outputTopic,
    final List<String> expectedReceivedValues) throws Exception {

    final KStreamBuilder builder = new KStreamBuilder();

    final KStream<String, String> pattern1Stream = builder.stream(KStreamBuilder.AutoOffsetReset.EARLIEST, Pattern.compile("topic-\\d" + topicSuffix));
    final KStream<String, String> pattern2Stream = builder.stream(KStreamBuilder.AutoOffsetReset.LATEST, Pattern.compile("topic-[A-D]" + topicSuffix));
    final KStream<String, String> namedTopicsStream = builder.stream(topicY, topicZ);

    pattern1Stream.to(stringSerde, stringSerde, outputTopic);
    pattern2Stream.to(stringSerde, stringSerde, outputTopic);
    namedTopicsStream.to(stringSerde, stringSerde, outputTopic);

    final Properties producerConfig = TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class);

    IntegrationTestUtils.produceValuesSynchronously(topic1, Collections.singletonList(topic1TestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(topic2, Collections.singletonList(topic2TestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(topicA, Collections.singletonList(topicATestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(topicC, Collections.singletonList(topicCTestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(topicY, Collections.singletonList(topicYTestMessage), producerConfig, mockTime);
    IntegrationTestUtils.produceValuesSynchronously(topicZ, Collections.singletonList(topicZTestMessage), producerConfig, mockTime);

    final Properties consumerConfig = TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class);

    final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
    streams.start();

    final List<KeyValue<String, String>> receivedKeyValues = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedReceivedValues.size());
    final List<String> actualValues = new ArrayList<>(expectedReceivedValues.size());

    for (final KeyValue<String, String> receivedKeyValue : receivedKeyValues) {
        actualValues.add(receivedKeyValue.value);
    }

    streams.close();
    Collections.sort(actualValues);
    Collections.sort(expectedReceivedValues);
    assertThat(actualValues, equalTo(expectedReceivedValues));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:48,代码来源:KStreamsFineGrainedAutoResetIntegrationTest.java


注:本文中的org.apache.kafka.streams.kstream.KStream.to方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。