当前位置: 首页>>代码示例>>Java>>正文


Java TopologyBuilder.addSource方法代码示例

本文整理汇总了Java中org.apache.kafka.streams.processor.TopologyBuilder.addSource方法的典型用法代码示例。如果您正苦于以下问题:Java TopologyBuilder.addSource方法的具体用法?Java TopologyBuilder.addSource怎么用?Java TopologyBuilder.addSource使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.streams.processor.TopologyBuilder的用法示例。


在下文中一共展示了TopologyBuilder.addSource方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount-processor");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    TopologyBuilder builder = new TopologyBuilder();

    builder.addSource("Source", "streams-file-input");

    builder.addProcessor("Process", new MyProcessorSupplier(), "Source");
    builder.addStateStore(Stores.create("Counts").withStringKeys().withIntegerValues().inMemory().build(), "Process");

    builder.addSink("Sink", "streams-wordcount-processor-output", "Process");

    KafkaStreams streams = new KafkaStreams(builder, props);
    streams.start();

    // usually the stream application would be running forever,
    // in this example we just let it run for some time and stop since the input data is finite.
    Thread.sleep(5000L);

    streams.close();
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:30,代码来源:WordCountProcessorDemo.java

示例2: testOnAssignment

import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
@Test
public void testOnAssignment() throws Exception {
    TopicPartition t2p3 = new TopicPartition("topic2", 3);

    TopologyBuilder builder = new TopologyBuilder();
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");

    UUID uuid = UUID.randomUUID();
    String client1 = "client1";

    StreamThread thread = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
                                           0);

    partitionAssignor.configure(config.getConsumerConfigs(thread, "test", client1));

    List<TaskId> activeTaskList = Utils.mkList(task0, task3);
    Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    activeTasks.put(task0, Utils.mkSet(t1p0));
    activeTasks.put(task3, Utils.mkSet(t2p3));
    standbyTasks.put(task1, Utils.mkSet(t1p0));
    standbyTasks.put(task2, Utils.mkSet(t2p0));

    AssignmentInfo info = new AssignmentInfo(activeTaskList, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>());
    PartitionAssignor.Assignment assignment = new PartitionAssignor.Assignment(Utils.mkList(t1p0, t2p3), info.encode());
    partitionAssignor.onAssignment(assignment);

    assertEquals(activeTasks, partitionAssignor.activeTasks());
    assertEquals(standbyTasks, partitionAssignor.standbyTasks());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:33,代码来源:StreamPartitionAssignorTest.java

示例3: main

import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "sentiment-analyzer");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
    props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

    // setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    TopologyBuilder builder = new TopologyBuilder();

    builder.addSource("Source", "test");

    builder.addProcessor("Process", new CalculateSentiment(), "Source");
    builder.addStateStore(Stores.create("SentimentAnalysis").withStringKeys().withStringValues().inMemory().build(), "Process");

    builder.addSink("Sink", "test-output", "Process");

    STREAMS = new KafkaStreams(builder, props);
    STREAMS.start();

    Runtime.getRuntime().addShutdownHook(new Thread("MirrorMakerShutdownHook") {
        @Override
        public void run() {
            System.out.println("Closing Calamus sentiment-analyzer.");
            STREAMS.close();
        }
    });
}
 
开发者ID:SinghAsDev,项目名称:calamus,代码行数:32,代码来源:SentimentAnalyzer.java

示例4: before

import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
@Before
public void before() throws IOException {
    final TopologyBuilder builder = new TopologyBuilder();
    builder.addSource("the-source", topicName);
    builder.addProcessor("the-processor", new MockProcessorSupplier(), "the-source");
    builder.addStateStore(Stores.create("kv-store")
                              .withStringKeys()
                              .withStringValues().inMemory().build(), "the-processor");

    builder.addStateStore(Stores.create("window-store")
                              .withStringKeys()
                              .withStringValues()
                              .persistent()
                              .windowed(10, 10, 2, false).build(), "the-processor");

    final Properties properties = new Properties();
    final String applicationId = "applicationId";
    properties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    stateDir = TestUtils.tempDirectory();
    final String stateConfigDir = stateDir.getPath();
    properties.put(StreamsConfig.STATE_DIR_CONFIG,
            stateConfigDir);

    final StreamsConfig streamsConfig = new StreamsConfig(properties);
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    configureRestoreConsumer(clientSupplier, "applicationId-kv-store-changelog");
    configureRestoreConsumer(clientSupplier, "applicationId-window-store-changelog");

    builder.setApplicationId(applicationId);
    final ProcessorTopology topology = builder.build(null);
    final Map<TaskId, StreamTask> tasks = new HashMap<>();
    stateDirectory = new StateDirectory(applicationId, stateConfigDir, new MockTime());
    taskOne = createStreamsTask(applicationId, streamsConfig, clientSupplier, topology,
                                new TaskId(0, 0));
    tasks.put(new TaskId(0, 0),
              taskOne);
    taskTwo = createStreamsTask(applicationId, streamsConfig, clientSupplier, topology,
                                new TaskId(0, 1));
    tasks.put(new TaskId(0, 1),
              taskTwo);

    storesAvailable = true;
    provider = new StreamThreadStateStoreProvider(
        new StreamThread(
            builder,
            streamsConfig,
            clientSupplier,
            applicationId,
            "clientId",
            UUID.randomUUID(),
            new Metrics(),
            Time.SYSTEM,
            new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST),
            0) {

            @Override
            public Map<TaskId, StreamTask> tasks() {
                return tasks;
            }

            @Override
            public boolean isInitialized() {
                return storesAvailable;
            }
        });
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:68,代码来源:StreamThreadStateStoreProviderTest.java

示例5: build

import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
@Override
public TopologyBuilder build(
        final String sparqlQuery,
        final String statementsTopic,
        final String resultsTopic,
        final BNodeIdFactory bNodeIdFactory)
        throws MalformedQueryException, TopologyBuilderException {
    requireNonNull(sparqlQuery);
    requireNonNull(statementsTopic);
    requireNonNull(resultsTopic);

    final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparqlQuery, null);
    final TopologyBuilder builder = new TopologyBuilder();

    final TupleExpr expr = parsedQuery.getTupleExpr();
    final QueryVisitor visitor = new QueryVisitor(bNodeIdFactory);
    expr.visit(visitor);

    processorEntryList = visitor.getProcessorEntryList();
    final Map<TupleExpr, String> idMap = visitor.getIDs();
    // add source node
    builder.addSource(SOURCE, new StringDeserializer(), new VisibilityStatementDeserializer(), statementsTopic);

    // processing the processor entry list in reverse order means we go from leaf
    // nodes -> parent nodes.
    // So, when the parent processing nodes get added, the upstream
    // processing node will already exist.

    ProcessorEntry entry = null;
    for (int ii = processorEntryList.size() - 1; ii >= 0; ii--) {
        entry = processorEntryList.get(ii);
        //statement patterns need to be connected to the Source.
        if(entry.getNode() instanceof StatementPattern) {
            builder.addProcessor(entry.getID(), entry.getSupplier(), SOURCE);
        } else {
            final List<TupleExpr> parents = entry.getUpstreamNodes();
            final String[] parentIDs = new String[parents.size()];
            for (int id = 0; id < parents.size(); id++) {
                parentIDs[id] = idMap.get(parents.get(id));
            }
            builder.addProcessor(entry.getID(), entry.getSupplier(), parentIDs);
        }

        // Add a state store for any node type that requires one.
        if (entry.getNode() instanceof Join ||  entry.getNode() instanceof LeftJoin || entry.getNode() instanceof Group) {
            // Add a state store for the join processor.
            final StateStoreSupplier joinStoreSupplier =
                    Stores.create( entry.getID() )
                        .withStringKeys()
                        .withValues(new VisibilityBindingSetSerde())
                        .persistent()
                        .build();
            builder.addStateStore(joinStoreSupplier, entry.getID());
        }
    }

    // Add a formatter that converts the ProcessorResults into the output format.
    final SinkEntry<?,?> sinkEntry = visitor.getSinkEntry();
    builder.addProcessor("OUTPUT_FORMATTER", sinkEntry.getFormatterSupplier(), entry.getID());

    // Add the sink.
    builder.addSink(SINK, resultsTopic, sinkEntry.getKeySerializer(), sinkEntry.getValueSerializer(), "OUTPUT_FORMATTER");

    return builder;
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:66,代码来源:TopologyFactory.java


注:本文中的org.apache.kafka.streams.processor.TopologyBuilder.addSource方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。