本文整理汇总了Java中org.apache.kafka.streams.processor.ProcessorContext.keySerde方法的典型用法代码示例。如果您正苦于以下问题:Java ProcessorContext.keySerde方法的具体用法?Java ProcessorContext.keySerde怎么用?Java ProcessorContext.keySerde使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.streams.processor.ProcessorContext
的用法示例。
在下文中一共展示了ProcessorContext.keySerde方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initInternal
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void initInternal(final ProcessorContext context) {
this.context = (InternalProcessorContext) context;
this.serdes = new StateSerdes<>(ProcessorStateManager.storeChangelogTopic(context.applicationId(), underlying.name()),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.cacheName = context.taskId() + "-" + underlying.name();
this.cache = this.context.getCache();
cache.addDirtyEntryFlushListener(cacheName, new ThreadCache.DirtyEntryFlushListener() {
@Override
public void apply(final List<ThreadCache.DirtyEntry> entries) {
for (ThreadCache.DirtyEntry entry : entries) {
putAndMaybeForward(entry, (InternalProcessorContext) context);
}
}
});
}
示例2: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
// construct the serde
this.serdes = new StateSerdes<>(
ProcessorStateManager.storeChangelogTopic(context.applicationId(), name),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
if (root != null) {
// register the store
context.register(root, true, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
// check value for null, to avoid deserialization error.
if (value == null) {
put(serdes.keyFrom(key), null);
} else {
put(serdes.keyFrom(key), serdes.valueFrom(value));
}
}
});
}
this.open = true;
}
示例3: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
inner.init(context, root);
// construct the serde
StateSerdes<K, V> serdes = new StateSerdes<>(
ProcessorStateManager.storeChangelogTopic(context.applicationId(), inner.name()),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.changeLogger = new StoreChangeLogger<>(inner.name(), context, serdes);
// if the inner store is an LRU cache, add the eviction listener to log removed record
if (inner instanceof MemoryLRUCache) {
((MemoryLRUCache<K, V>) inner).whenEldestRemoved(new MemoryNavigableLRUCache.EldestEntryRemovalListener<K, V>() {
@Override
public void apply(K key, V value) {
removed(key);
}
});
}
}
示例4: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@Override
public void init(ProcessorContext context) {
this.context = context;
if (keySerde == null) {
this.keySerde = context.keySerde();
}
if (valueSerde == null) {
this.valueSerde = context.valueSerde();
}
}
示例5: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(final ProcessorContext context, final StateStore root) {
this.context = context;
// construct the serde
serdes = new StateSerdes<>(ProcessorStateManager.storeChangelogTopic(context.applicationId(), bytesStore.name()),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
bytesStore.init(context, root);
}
示例6: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(final ProcessorContext context, final StateStore root) {
final String storeName = bytesStore.name();
topic = ProcessorStateManager.storeChangelogTopic(context.applicationId(), storeName);
serdes = new StateSerdes<>(
topic,
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
aggSerde == null ? (Serde<AGG>) context.valueSerde() : aggSerde);
bytesStore.init(context, root);
}
示例7: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void init(final ProcessorContext context, final StateStore root) {
innerBytes.init(context, root);
serdes = new StateSerdes<>(ProcessorStateManager.storeChangelogTopic(context.applicationId(), innerBytes.name()),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
}
示例8: init
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void init(ProcessorContext context, StateStore root) {
// construct the serde
this.serdes = new StateSerdes<>(
ProcessorStateManager.storeChangelogTopic(context.applicationId(), name),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
// register the store
context.register(root, true, new StateRestoreCallback() {
@Override
public void restore(byte[] key, byte[] value) {
restoring = true;
// check value for null, to avoid deserialization error.
if (value == null) {
put(serdes.keyFrom(key), null);
} else {
put(serdes.keyFrom(key), serdes.valueFrom(value));
}
restoring = false;
}
});
}
示例9: openDB
import org.apache.kafka.streams.processor.ProcessorContext; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void openDB(ProcessorContext context) {
// initialize the default rocksdb options
final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
tableConfig.setBlockCacheSize(BLOCK_CACHE_SIZE);
tableConfig.setBlockSize(BLOCK_SIZE);
options = new Options();
options.setTableFormatConfig(tableConfig);
options.setWriteBufferSize(WRITE_BUFFER_SIZE);
options.setCompressionType(COMPRESSION_TYPE);
options.setCompactionStyle(COMPACTION_STYLE);
options.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
options.setCreateIfMissing(true);
options.setErrorIfExists(false);
options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
// this is the recommended way to increase parallelism in RocksDb
// note that the current implementation of setIncreaseParallelism affects the number
// of compaction threads but not flush threads (the latter remains one). Also
// the parallelism value needs to be at least two because of the code in
// https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
// subtracts one from the value passed to determine the number of compaction threads
// (this could be a bug in the RocksDB code and their devs have been contacted).
options.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));
wOptions = new WriteOptions();
wOptions.setDisableWAL(true);
fOptions = new FlushOptions();
fOptions.setWaitForFlush(true);
final Map<String, Object> configs = context.appConfigs();
final Object configSetterValue = configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG);
final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) ConfigDef.parseType(
StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG,
configSetterValue,
ConfigDef.Type.CLASS);
if (configSetterClass != null) {
final RocksDBConfigSetter configSetter = Utils.newInstance(configSetterClass);
configSetter.setConfig(name, options, configs);
}
// we need to construct the serde while opening DB since
// it is also triggered by windowed DB segments without initialization
this.serdes = new StateSerdes<>(
ProcessorStateManager.storeChangelogTopic(context.applicationId(), name),
keySerde == null ? (Serde<K>) context.keySerde() : keySerde,
valueSerde == null ? (Serde<V>) context.valueSerde() : valueSerde);
this.dbDir = new File(new File(context.stateDir(), parentDir), this.name);
try {
this.db = openDB(this.dbDir, this.options, TTL_SECONDS);
} catch (IOException e) {
throw new StreamsException(e);
}
}