当前位置: 首页>>代码示例>>Java>>正文


Java TopologyBuilder类代码示例

本文整理汇总了Java中org.apache.kafka.streams.processor.TopologyBuilder的典型用法代码示例。如果您正苦于以下问题:Java TopologyBuilder类的具体用法?Java TopologyBuilder怎么用?Java TopologyBuilder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TopologyBuilder类属于org.apache.kafka.streams.processor包,在下文中一共展示了TopologyBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: shouldDriveGlobalStore

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void shouldDriveGlobalStore() throws Exception {
    final StateStoreSupplier storeSupplier = Stores.create("my-store")
            .withStringKeys().withStringValues().inMemory().disableLogging().build();
    final String global = "global";
    final String topic = "topic";
    final TopologyBuilder topologyBuilder = this.builder
            .addGlobalStore(storeSupplier, global, STRING_DESERIALIZER, STRING_DESERIALIZER, topic, "processor", define(new StatefulProcessor("my-store")));

    driver = new ProcessorTopologyTestDriver(config, topologyBuilder);
    final KeyValueStore<String, String> globalStore = (KeyValueStore<String, String>) topologyBuilder.globalStateStores().get("my-store");
    driver.process(topic, "key1", "value1", STRING_SERIALIZER, STRING_SERIALIZER);
    driver.process(topic, "key2", "value2", STRING_SERIALIZER, STRING_SERIALIZER);
    assertEquals("value1", globalStore.get("key1"));
    assertEquals("value2", globalStore.get("key2"));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:ProcessorTopologyTest.java

示例2: processingTopologyBuilder

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
private TopologyBuilder processingTopologyBuilder() {

        TopologyBuilder builder = new TopologyBuilder();

        builder.addSource(SOURCE_NAME, topicName)
                .addProcessor(PROCESSOR_NAME, new ProcessorSupplier() {
                    @Override
                    public Processor get() {
                        return new TweetStreamProcessor();
                    }
                }, SOURCE_NAME);

        LOGGER.info("Kafka streams processing topology ready");

        return builder;
    }
 
开发者ID:abhirockzz,项目名称:accs-kafka-streams,代码行数:17,代码来源:KafkaStreamsTweetAnalysisJob.java

示例3: tasksForState

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
private Set<TaskId> tasksForState(String applicationId, String storeName, List<TaskId> tasks, Map<Integer, TopologyBuilder.TopicsInfo> topicGroups) {
    final String changelogTopic = ProcessorStateManager.storeChangelogTopic(applicationId, storeName);

    Set<TaskId> ids = new HashSet<>();
    for (Map.Entry<Integer, TopologyBuilder.TopicsInfo> entry : topicGroups.entrySet()) {
        Set<String> stateChangelogTopics = entry.getValue().stateChangelogTopics.keySet();

        if (stateChangelogTopics.contains(changelogTopic)) {
            for (TaskId id : tasks) {
                if (id.topicGroupId == entry.getKey())
                    ids.add(id);
            }
        }
    }
    return ids;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:StreamPartitionAssignorTest.java

示例4: main

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
		Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount-processor");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka0:19092");
        props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "zookeeper0:12181/kafka");
        props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		
		TopologyBuilder builder = new TopologyBuilder();
		builder.addSource("SOURCE", new StringDeserializer(), new StringDeserializer(), "words")
				.addProcessor("WordCountProcessor", WordCountProcessor::new, "SOURCE")
				.addStateStore(Stores.create("Counts").withStringKeys().withIntegerValues().inMemory().build(), "WordCountProcessor")
//				.connectProcessorAndStateStores("WordCountProcessor", "Counts")
				.addSink("SINK", "count", new StringSerializer(), new IntegerSerializer(), "WordCountProcessor");
		
        KafkaStreams stream = new KafkaStreams(builder, props);
        stream.start();
        System.in.read();
        stream.close();
        stream.cleanUp();
	}
 
开发者ID:habren,项目名称:KafkaExample,代码行数:23,代码来源:WordCountTopology.java

示例5: processingTopologyBuilder

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
private TopologyBuilder processingTopologyBuilder() {
    //create state store
    StateStoreSupplier machineToAvgCPUUsageStore
            = Stores.create(AVG_STORE_NAME)
                    .withStringKeys()
                    .withDoubleValues()
                    .inMemory()
                    .build();

    StateStoreSupplier machineToNumOfRecordsReadStore
            = Stores.create(NUM_RECORDS_STORE_NAME)
                    .withStringKeys()
                    .withIntegerValues()
                    .inMemory()
                    .build();

    TopologyBuilder builder = new TopologyBuilder();

    builder.addSource(SOURCE_NAME, TOPIC_NAME)
            .addProcessor(PROCESSOR_NAME, new ProcessorSupplier() {
                @Override
                public Processor get() {
                    return new CPUCumulativeAverageProcessor();
                }
            }, SOURCE_NAME)
            .addStateStore(machineToAvgCPUUsageStore, PROCESSOR_NAME)
            .addStateStore(machineToNumOfRecordsReadStore, PROCESSOR_NAME);

    LOGGER.info("Kafka streams processing topology ready");

    return builder;
}
 
开发者ID:abhirockzz,项目名称:docker-kafka-streams,代码行数:33,代码来源:CPUMetricStreamHandler.java

示例6: processingTopologyBuilder

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
private TopologyBuilder processingTopologyBuilder() {

        StateStoreSupplier machineToAvgCPUUsageStore
                = Stores.create(AVG_STORE_NAME)
                        .withStringKeys()
                        .withDoubleValues()
                        .inMemory()
                        .build();

        StateStoreSupplier machineToNumOfRecordsReadStore
                = Stores.create(NUM_RECORDS_STORE_NAME)
                        .withStringKeys()
                        .withIntegerValues()
                        .inMemory()
                        .build();

        TopologyBuilder builder = new TopologyBuilder();

        builder.addSource(SOURCE_NAME, TOPIC_NAME)
                .addProcessor(PROCESSOR_NAME, new ProcessorSupplier() {
                    @Override
                    public Processor get() {
                        return new CPUCumulativeAverageProcessor();
                    }
                }, SOURCE_NAME)
                .addStateStore(machineToAvgCPUUsageStore, PROCESSOR_NAME)
                .addStateStore(machineToNumOfRecordsReadStore, PROCESSOR_NAME);

        LOGGER.info("Kafka streams processing topology ready");

        return builder;
    }
 
开发者ID:abhirockzz,项目名称:kafka-streams-example,代码行数:33,代码来源:CPUMetricStreamHandler.java

示例7: main

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount-processor");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    TopologyBuilder builder = new TopologyBuilder();

    builder.addSource("Source", "streams-file-input");

    builder.addProcessor("Process", new MyProcessorSupplier(), "Source");
    builder.addStateStore(Stores.create("Counts").withStringKeys().withIntegerValues().inMemory().build(), "Process");

    builder.addSink("Sink", "streams-wordcount-processor-output", "Process");

    KafkaStreams streams = new KafkaStreams(builder, props);
    streams.start();

    // usually the stream application would be running forever,
    // in this example we just let it run for some time and stop since the input data is finite.
    Thread.sleep(5000L);

    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:30,代码来源:WordCountProcessorDemo.java

示例8: shouldAddStateStoreToRegexDefinedSource

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddStateStoreToRegexDefinedSource() throws Exception {

    final ProcessorSupplier<String, String> processorSupplier = new MockProcessorSupplier<>();
    final MockStateStoreSupplier stateStoreSupplier = new MockStateStoreSupplier("testStateStore", false);
    final long thirtySecondTimeout = 30 * 1000;

    final TopologyBuilder builder = new TopologyBuilder()
            .addSource("ingest", Pattern.compile("topic-\\d+"))
            .addProcessor("my-processor", processorSupplier, "ingest")
            .addStateStore(stateStoreSupplier, "my-processor");


    final KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
    try {
        streams.start();

        final TestCondition stateStoreNameBoundToSourceTopic = new TestCondition() {
            @Override
            public boolean conditionMet() {
                final Map<String, List<String>> stateStoreToSourceTopic = builder.stateStoreNameToSourceTopics();
                final List<String> topicNamesList = stateStoreToSourceTopic.get("testStateStore");
                return topicNamesList != null && !topicNamesList.isEmpty() && topicNamesList.get(0).equals("topic-1");
            }
        };

        TestUtils.waitForCondition(stateStoreNameBoundToSourceTopic, thirtySecondTimeout, "Did not find topic: [topic-1] connected to state store: [testStateStore]");

    } finally {
        streams.close();
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:33,代码来源:RegexSourceIntegrationTest.java

示例9: shouldAddTopicToEarliestAutoOffsetResetList

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddTopicToEarliestAutoOffsetResetList() {
    final String topicName = "topic-1";
    
    builder.stream(TopologyBuilder.AutoOffsetReset.EARLIEST, topicName);
    
    assertTrue(builder.earliestResetTopicsPattern().matcher(topicName).matches());
    assertFalse(builder.latestResetTopicsPattern().matcher(topicName).matches());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:KStreamBuilderTest.java

示例10: shouldAddTopicToLatestAutoOffsetResetList

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddTopicToLatestAutoOffsetResetList() {
    final String topicName = "topic-1";

    builder.stream(TopologyBuilder.AutoOffsetReset.LATEST, topicName);

    assertTrue(builder.latestResetTopicsPattern().matcher(topicName).matches());
    assertFalse(builder.earliestResetTopicsPattern().matcher(topicName).matches());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:10,代码来源:KStreamBuilderTest.java

示例11: shouldAddTableToEarliestAutoOffsetResetList

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddTableToEarliestAutoOffsetResetList() {
    final String topicName = "topic-1";
    final String storeName = "test-store";

    builder.table(TopologyBuilder.AutoOffsetReset.EARLIEST, topicName, storeName);

    assertTrue(builder.earliestResetTopicsPattern().matcher(topicName).matches());
    assertFalse(builder.latestResetTopicsPattern().matcher(topicName).matches());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:KStreamBuilderTest.java

示例12: shouldAddTableToLatestAutoOffsetResetList

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddTableToLatestAutoOffsetResetList() {
    final String topicName = "topic-1";
    final String storeName = "test-store";

    builder.table(TopologyBuilder.AutoOffsetReset.LATEST, topicName, storeName);

    assertTrue(builder.latestResetTopicsPattern().matcher(topicName).matches());
    assertFalse(builder.earliestResetTopicsPattern().matcher(topicName).matches());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:KStreamBuilderTest.java

示例13: shouldAddRegexTopicToEarliestAutoOffsetResetList

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddRegexTopicToEarliestAutoOffsetResetList() {
    final Pattern topicPattern = Pattern.compile("topic-\\d+");
    final String topicTwo = "topic-500000";

    builder.stream(TopologyBuilder.AutoOffsetReset.EARLIEST, topicPattern);

    assertTrue(builder.earliestResetTopicsPattern().matcher(topicTwo).matches());
    assertFalse(builder.latestResetTopicsPattern().matcher(topicTwo).matches());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:KStreamBuilderTest.java

示例14: shouldAddRegexTopicToLatestAutoOffsetResetList

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void shouldAddRegexTopicToLatestAutoOffsetResetList() {
    final Pattern topicPattern = Pattern.compile("topic-\\d+");
    final String topicTwo = "topic-1000000";

    builder.stream(TopologyBuilder.AutoOffsetReset.LATEST, topicPattern);

    assertTrue(builder.latestResetTopicsPattern().matcher(topicTwo).matches());
    assertFalse(builder.earliestResetTopicsPattern().matcher(topicTwo).matches());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:KStreamBuilderTest.java

示例15: testOnAssignment

import org.apache.kafka.streams.processor.TopologyBuilder; //导入依赖的package包/类
@Test
public void testOnAssignment() throws Exception {
    TopicPartition t2p3 = new TopicPartition("topic2", 3);

    TopologyBuilder builder = new TopologyBuilder();
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");

    UUID uuid = UUID.randomUUID();
    String client1 = "client1";

    StreamThread thread = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
                                           0);

    partitionAssignor.configure(config.getConsumerConfigs(thread, "test", client1));

    List<TaskId> activeTaskList = Utils.mkList(task0, task3);
    Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    activeTasks.put(task0, Utils.mkSet(t1p0));
    activeTasks.put(task3, Utils.mkSet(t2p3));
    standbyTasks.put(task1, Utils.mkSet(t1p0));
    standbyTasks.put(task2, Utils.mkSet(t2p0));

    AssignmentInfo info = new AssignmentInfo(activeTaskList, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>());
    PartitionAssignor.Assignment assignment = new PartitionAssignor.Assignment(Utils.mkList(t1p0, t2p3), info.encode());
    partitionAssignor.onAssignment(assignment);

    assertEquals(activeTasks, partitionAssignor.activeTasks());
    assertEquals(standbyTasks, partitionAssignor.standbyTasks());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:33,代码来源:StreamPartitionAssignorTest.java


注:本文中的org.apache.kafka.streams.processor.TopologyBuilder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。