本文整理汇总了Java中org.apache.kafka.streams.processor.TopologyBuilder.addSink方法的典型用法代码示例。如果您正苦于以下问题:Java TopologyBuilder.addSink方法的具体用法?Java TopologyBuilder.addSink怎么用?Java TopologyBuilder.addSink使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.streams.processor.TopologyBuilder
的用法示例。
在下文中一共展示了TopologyBuilder.addSink方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount-processor");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
// setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("Source", "streams-file-input");
builder.addProcessor("Process", new MyProcessorSupplier(), "Source");
builder.addStateStore(Stores.create("Counts").withStringKeys().withIntegerValues().inMemory().build(), "Process");
builder.addSink("Sink", "streams-wordcount-processor-output", "Process");
KafkaStreams streams = new KafkaStreams(builder, props);
streams.start();
// usually the stream application would be running forever,
// in this example we just let it run for some time and stop since the input data is finite.
Thread.sleep(5000L);
streams.close();
}
示例2: main
import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "sentiment-analyzer");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
// setting offset reset to earliest so that we can re-run the demo code with the same pre-loaded data
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("Source", "test");
builder.addProcessor("Process", new CalculateSentiment(), "Source");
builder.addStateStore(Stores.create("SentimentAnalysis").withStringKeys().withStringValues().inMemory().build(), "Process");
builder.addSink("Sink", "test-output", "Process");
STREAMS = new KafkaStreams(builder, props);
STREAMS.start();
Runtime.getRuntime().addShutdownHook(new Thread("MirrorMakerShutdownHook") {
@Override
public void run() {
System.out.println("Closing Calamus sentiment-analyzer.");
STREAMS.close();
}
});
}
示例3: build
import org.apache.kafka.streams.processor.TopologyBuilder; //导入方法依赖的package包/类
@Override
public TopologyBuilder build(
final String sparqlQuery,
final String statementsTopic,
final String resultsTopic,
final BNodeIdFactory bNodeIdFactory)
throws MalformedQueryException, TopologyBuilderException {
requireNonNull(sparqlQuery);
requireNonNull(statementsTopic);
requireNonNull(resultsTopic);
final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparqlQuery, null);
final TopologyBuilder builder = new TopologyBuilder();
final TupleExpr expr = parsedQuery.getTupleExpr();
final QueryVisitor visitor = new QueryVisitor(bNodeIdFactory);
expr.visit(visitor);
processorEntryList = visitor.getProcessorEntryList();
final Map<TupleExpr, String> idMap = visitor.getIDs();
// add source node
builder.addSource(SOURCE, new StringDeserializer(), new VisibilityStatementDeserializer(), statementsTopic);
// processing the processor entry list in reverse order means we go from leaf
// nodes -> parent nodes.
// So, when the parent processing nodes get added, the upstream
// processing node will already exist.
ProcessorEntry entry = null;
for (int ii = processorEntryList.size() - 1; ii >= 0; ii--) {
entry = processorEntryList.get(ii);
//statement patterns need to be connected to the Source.
if(entry.getNode() instanceof StatementPattern) {
builder.addProcessor(entry.getID(), entry.getSupplier(), SOURCE);
} else {
final List<TupleExpr> parents = entry.getUpstreamNodes();
final String[] parentIDs = new String[parents.size()];
for (int id = 0; id < parents.size(); id++) {
parentIDs[id] = idMap.get(parents.get(id));
}
builder.addProcessor(entry.getID(), entry.getSupplier(), parentIDs);
}
// Add a state store for any node type that requires one.
if (entry.getNode() instanceof Join || entry.getNode() instanceof LeftJoin || entry.getNode() instanceof Group) {
// Add a state store for the join processor.
final StateStoreSupplier joinStoreSupplier =
Stores.create( entry.getID() )
.withStringKeys()
.withValues(new VisibilityBindingSetSerde())
.persistent()
.build();
builder.addStateStore(joinStoreSupplier, entry.getID());
}
}
// Add a formatter that converts the ProcessorResults into the output format.
final SinkEntry<?,?> sinkEntry = visitor.getSinkEntry();
builder.addProcessor("OUTPUT_FORMATTER", sinkEntry.getFormatterSupplier(), entry.getID());
// Add the sink.
builder.addSink(SINK, resultsTopic, sinkEntry.getKeySerializer(), sinkEntry.getValueSerializer(), "OUTPUT_FORMATTER");
return builder;
}