本文整理汇总了Java中org.apache.kafka.streams.processor.ProcessorContext类的典型用法代码示例。如果您正苦于以下问题:Java ProcessorContext类的具体用法?Java ProcessorContext怎么用?Java ProcessorContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ProcessorContext类属于org.apache.kafka.streams.processor包,在下文中一共展示了ProcessorContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
public void init(ProcessorContext context, StateStore root) {
this.context = context;
keySchema.init(ProcessorStateManager.storeChangelogTopic(context.applicationId(), root.name()));
segments.openExisting(context);
// register and possibly restore the state from the logs
context.register(root, false, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
put(Bytes.wrap(key), value);
}
});
flush();
open = true;
}
示例2: initInternal
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void initInternal(final ProcessorContext context) {
this.context = (InternalProcessorContext) context;
this.serdes = new StateSerdes<>(ProcessorStateManager.storeChangelogTopic(context.applicationId(), underlying.name()),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.cacheName = context.taskId() + "-" + underlying.name();
this.cache = this.context.getCache();
cache.addDirtyEntryFlushListener(cacheName, new ThreadCache.DirtyEntryFlushListener() {
@Override
public void apply(final List<ThreadCache.DirtyEntry> entries) {
for (ThreadCache.DirtyEntry entry : entries) {
putAndMaybeForward(entry, (InternalProcessorContext) context);
}
}
});
}
示例3: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
// construct the serde
this.serdes = new StateSerdes<>(
ProcessorStateManager.storeChangelogTopic(context.applicationId(), name),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
if (root != null) {
// register the store
context.register(root, true, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
// check value for null, to avoid deserialization error.
if (value == null) {
put(serdes.keyFrom(key), null);
} else {
put(serdes.keyFrom(key), serdes.valueFrom(value));
}
}
});
}
this.open = true;
}
示例4: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
public void init(ProcessorContext context, StateStore root) {
final String name = name();
this.context = context;
this.root = root;
this.metrics = (StreamsMetricsImpl) context.metrics();
this.putTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "put", Sensor.RecordingLevel.DEBUG);
this.putIfAbsentTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "put-if-absent", Sensor.RecordingLevel.DEBUG);
this.getTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "get", Sensor.RecordingLevel.DEBUG);
this.deleteTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "delete", Sensor.RecordingLevel.DEBUG);
this.putAllTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "put-all", Sensor.RecordingLevel.DEBUG);
this.allTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "all", Sensor.RecordingLevel.DEBUG);
this.rangeTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "range", Sensor.RecordingLevel.DEBUG);
this.flushTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "flush", Sensor.RecordingLevel.DEBUG);
this.restoreTime = this.metrics.addLatencyAndThroughputSensor(metricScope, name, "restore", Sensor.RecordingLevel.DEBUG);
// register and possibly restore the state from the logs
metrics.measureLatencyNs(time, initDelegate, this.restoreTime);
}
示例5: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
inner.init(context, root);
// construct the serde
StateSerdes<K, V> serdes = new StateSerdes<>(
ProcessorStateManager.storeChangelogTopic(context.applicationId(), inner.name()),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.changeLogger = new StoreChangeLogger<>(inner.name(), context, serdes);
// if the inner store is an LRU cache, add the eviction listener to log removed record
if (inner instanceof MemoryLRUCache) {
((MemoryLRUCache<K, V>) inner).whenEldestRemoved(new MemoryNavigableLRUCache.EldestEntryRemovalListener<K, V>() {
@Override
public void apply(K key, V value) {
removed(key);
}
});
}
}
示例6: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void init(ProcessorContext context) {
super.init(context);
this.context = context;
// if deserializers are null, get the default ones from the context
if (this.keyDeserializer == null)
this.keyDeserializer = ensureExtended((Deserializer<K>) context.keySerde().deserializer());
if (this.valDeserializer == null)
this.valDeserializer = ensureExtended((Deserializer<V>) context.valueSerde().deserializer());
// if value deserializers are for {@code Change} values, set the inner deserializer when necessary
if (this.valDeserializer instanceof ChangedDeserializer &&
((ChangedDeserializer) this.valDeserializer).inner() == null)
((ChangedDeserializer) this.valDeserializer).setInner(context.valueSerde().deserializer());
}
示例7: createKeyValueStore
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
protected <K, V> KeyValueStore<K, V> createKeyValueStore(
ProcessorContext context,
Class<K> keyClass,
Class<V> valueClass,
boolean useContextSerdes) {
StateStoreSupplier supplier;
if (useContextSerdes) {
supplier = Stores.create("my-store").withKeys(context.keySerde()).withValues(context.valueSerde()).inMemory().maxEntries(10).build();
} else {
supplier = Stores.create("my-store").withKeys(keyClass).withValues(valueClass).inMemory().maxEntries(10).build();
}
KeyValueStore<K, V> store = (KeyValueStore<K, V>) supplier.get();
store.init(context, store);
return store;
}
示例8: createKeyValueStore
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
protected <K, V> KeyValueStore<K, V> createKeyValueStore(final ProcessorContext context,
final Class<K> keyClass,
final Class<V> valueClass,
final boolean useContextSerdes) {
final Stores.PersistentKeyValueFactory<?, ?> factory;
if (useContextSerdes) {
factory = Stores
.create("my-store")
.withKeys(context.keySerde())
.withValues(context.valueSerde())
.persistent();
} else {
factory = Stores
.create("my-store")
.withKeys(keyClass)
.withValues(valueClass)
.persistent();
}
final KeyValueStore<K, V> store = (KeyValueStore<K, V>) factory.build().get();
store.init(context, store);
return store;
}
示例9: createKeyValueStore
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
protected <K, V> KeyValueStore<K, V> createKeyValueStore(
ProcessorContext context,
Class<K> keyClass,
Class<V> valueClass,
boolean useContextSerdes) {
StateStoreSupplier supplier;
if (useContextSerdes) {
supplier = Stores.create("my-store").withKeys(context.keySerde()).withValues(context.valueSerde()).inMemory().build();
} else {
supplier = Stores.create("my-store").withKeys(keyClass).withValues(valueClass).inMemory().build();
}
KeyValueStore<K, V> store = (KeyValueStore<K, V>) supplier.get();
store.init(context, store);
return store;
}
示例10: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context) {
this.context = context;
long interval = 24 * 60 * 60 * 1000;// 24 小时
this.context.schedule(interval, PunctuationType.STREAM_TIME, (long timestamp)->{
this.punctuate(timestamp);
});
this.kvStore = (KeyValueStore<Long, List<OrderGoods>>) context.getStateStore(OrderConstants.GOOD_ORDER_TOPIC);
}
示例11: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
public void init(ProcessorContext pc) {
this.pc = pc;
this.pc.schedule(12000); //invoke punctuate every 12 seconds
this.machineToAvgCPUUsageStore = (KeyValueStore<String, Double>) pc.getStateStore(AVG_STORE_NAME);
this.machineToNumOfRecordsReadStore = (KeyValueStore<String, Integer>) pc.getStateStore(NUM_RECORDS_STORE_NAME);
PROC_LOGGER.info("Processor initialized");
}
示例12: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
public void init(ProcessorContext context) {
/* Should I call init? Maybe, boh.... */
//super.init(context);
this.context = context;
}
示例13: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context) {
this.context = context;
// this.context.schedule(1000);
this.store = (KeyValueStore<String, StreamState>) this.context.getStateStore(Consumer.STREAM_STATE);
Objects.requireNonNull(store,"State store can't be null" );
}
示例14: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void init(ProcessorContext context) {
super.init(context);
window = (WindowStore<K, V>) context.getStateStore(windowName);
}
示例15: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void init(ProcessorContext context) {
super.init(context);
if (queryableName != null) {
store = (KeyValueStore<K, V>) context.getStateStore(queryableName);
tupleForwarder = new TupleForwarder<>(store, context, new ForwardingCacheFlushListener<K, V>(context, sendOldValues), sendOldValues);
}
}