本文整理汇总了Java中org.apache.kafka.streams.processor.Processor类的典型用法代码示例。如果您正苦于以下问题:Java Processor类的具体用法?Java Processor怎么用?Java Processor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Processor类属于org.apache.kafka.streams.processor包,在下文中一共展示了Processor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processingTopologyBuilder
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
private TopologyBuilder processingTopologyBuilder() {
TopologyBuilder builder = new TopologyBuilder();
builder.addSource(SOURCE_NAME, topicName)
.addProcessor(PROCESSOR_NAME, new ProcessorSupplier() {
@Override
public Processor get() {
return new TweetStreamProcessor();
}
}, SOURCE_NAME);
LOGGER.info("Kafka streams processing topology ready");
return builder;
}
示例2: shutdown
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see com.heliosapm.streams.metrics.processors.StreamedMetricProcessorSupplier#shutdown()
*/
@Override
public void shutdown() {
log.info(">>>>> Stopping [{}]...", getClass().getSimpleName());
for(Processor<K, V> p: startedProcessors) {
try {
log.info(">>>>> Stopping Processor [{}]...", p.getClass().getSimpleName());
p.close();
log.info("<<<<< Processor [{}] Stopped.", p.getClass().getSimpleName());
} catch (Exception ex) {
log.error("Failed to close processor [{}]", p, ex);
}
}
startedProcessors.clear();
log.info("<<<<< Stopped [{}]", getClass().getSimpleName());
}
示例3: processingTopologyBuilder
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
private TopologyBuilder processingTopologyBuilder() {
//create state store
StateStoreSupplier machineToAvgCPUUsageStore
= Stores.create(AVG_STORE_NAME)
.withStringKeys()
.withDoubleValues()
.inMemory()
.build();
StateStoreSupplier machineToNumOfRecordsReadStore
= Stores.create(NUM_RECORDS_STORE_NAME)
.withStringKeys()
.withIntegerValues()
.inMemory()
.build();
TopologyBuilder builder = new TopologyBuilder();
builder.addSource(SOURCE_NAME, TOPIC_NAME)
.addProcessor(PROCESSOR_NAME, new ProcessorSupplier() {
@Override
public Processor get() {
return new CPUCumulativeAverageProcessor();
}
}, SOURCE_NAME)
.addStateStore(machineToAvgCPUUsageStore, PROCESSOR_NAME)
.addStateStore(machineToNumOfRecordsReadStore, PROCESSOR_NAME);
LOGGER.info("Kafka streams processing topology ready");
return builder;
}
示例4: processingTopologyBuilder
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
private TopologyBuilder processingTopologyBuilder() {
StateStoreSupplier machineToAvgCPUUsageStore
= Stores.create(AVG_STORE_NAME)
.withStringKeys()
.withDoubleValues()
.inMemory()
.build();
StateStoreSupplier machineToNumOfRecordsReadStore
= Stores.create(NUM_RECORDS_STORE_NAME)
.withStringKeys()
.withIntegerValues()
.inMemory()
.build();
TopologyBuilder builder = new TopologyBuilder();
builder.addSource(SOURCE_NAME, TOPIC_NAME)
.addProcessor(PROCESSOR_NAME, new ProcessorSupplier() {
@Override
public Processor get() {
return new CPUCumulativeAverageProcessor();
}
}, SOURCE_NAME)
.addStateStore(machineToAvgCPUUsageStore, PROCESSOR_NAME)
.addStateStore(machineToNumOfRecordsReadStore, PROCESSOR_NAME);
LOGGER.info("Kafka streams processing topology ready");
return builder;
}
示例5: ProcessorNode
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
public ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores) {
this.name = name;
this.processor = processor;
this.children = new ArrayList<>();
this.stateStores = stateStores;
this.time = new SystemTime();
}
示例6: define
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
private <K, V> ProcessorSupplier<K, V> define(final Processor<K, V> processor) {
return new ProcessorSupplier<K, V>() {
@Override
public Processor<K, V> get() {
return processor;
}
};
}
示例7: createKafkaStreams
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
private KafkaStreams createKafkaStreams(String topic, final CountDownLatch latch) {
Properties props = setStreamProperties("simple-benchmark-streams");
KStreamBuilder builder = new KStreamBuilder();
KStream<Integer, byte[]> source = builder.stream(INTEGER_SERDE, BYTE_SERDE, topic);
source.process(new ProcessorSupplier<Integer, byte[]>() {
@Override
public Processor<Integer, byte[]> get() {
return new AbstractProcessor<Integer, byte[]>() {
@Override
public void init(ProcessorContext context) {
}
@Override
public void process(Integer key, byte[] value) {
processedRecords++;
processedBytes += value.length + Integer.SIZE;
if (processedRecords == numRecords) {
latch.countDown();
}
}
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
});
return createKafkaStreamsWithExceptionHandler(builder, props);
}
示例8: createKafkaStreamsWithSink
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
private KafkaStreams createKafkaStreamsWithSink(String topic, final CountDownLatch latch) {
final Properties props = setStreamProperties("simple-benchmark-streams-with-sink");
KStreamBuilder builder = new KStreamBuilder();
KStream<Integer, byte[]> source = builder.stream(INTEGER_SERDE, BYTE_SERDE, topic);
source.to(INTEGER_SERDE, BYTE_SERDE, SINK_TOPIC);
source.process(new ProcessorSupplier<Integer, byte[]>() {
@Override
public Processor<Integer, byte[]> get() {
return new AbstractProcessor<Integer, byte[]>() {
@Override
public void init(ProcessorContext context) {
}
@Override
public void process(Integer key, byte[] value) {
processedRecords++;
processedBytes += value.length + Integer.SIZE;
if (processedRecords == numRecords) {
latch.countDown();
}
}
@Override
public void punctuate(long timestamp) {
}
@Override
public void close() {
}
};
}
});
return createKafkaStreamsWithExceptionHandler(builder, props);
}
示例9: get
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
/**
* {@inheritDoc}
* @see org.apache.kafka.streams.processor.ProcessorSupplier#get()
*/
@Override
public Processor<K, V> get() {
final AbstractStreamedMetricProcessor<K,V> processor = getProcessor(topicSink, sources);
startedProcessors.add(processor);
final String processorBeanName = processor.getClass().getSimpleName() + "#" + processor.getInstanceId();
processor.setBeanName(processorBeanName);
((SingletonBeanRegistry)appCtx.getAutowireCapableBeanFactory()).registerSingleton(processorBeanName, processor);
appCtx.getAutowireCapableBeanFactory().autowireBean(processor);
return processor;
}
示例10: processorDef
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
/**
* Utility method to create a {@link ProcessorSupplier} for a given service.
*
* @param processorInstance the processor instance; may not be null
* @return the {@link ProcessorSupplier} instance; never null
*/
protected static ProcessorSupplier<String, Document> processorDef(ServiceProcessor processorInstance) {
return new ProcessorSupplier<String, Document>() {
@Override
public Processor<String, Document> get() {
return processorInstance;
}
};
}
示例11: get
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
@Override
public Processor<K, Change<V1>> get() {
return new KTableKTableLeftJoinProcessor(valueGetterSupplier2.get());
}
示例12: get
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
@Override
public Processor<K, Change<V>> get() {
return new KTableMapProcessor();
}
示例13: get
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
@Override
public Processor<K1, V1> get() {
return new KStreamKTableJoinProcessor<>(valueGetterSupplier.get(), mapper, joiner, leftJoin);
}
示例14: get
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
@Override
public Processor<K, Change<V1>> get() {
return new KTableKTableOuterJoinProcessor(valueGetterSupplier2.get());
}
示例15: get
import org.apache.kafka.streams.processor.Processor; //导入依赖的package包/类
@Override
public Processor<K, V> get() {
return new KStreamJoinWindowProcessor();
}