本文整理匯總了Java中org.apache.kafka.common.serialization.Serializer類的典型用法代碼示例。如果您正苦於以下問題:Java Serializer類的具體用法?Java Serializer怎麽用?Java Serializer使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Serializer類屬於org.apache.kafka.common.serialization包,在下文中一共展示了Serializer類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: createProducer
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
public <K, V> KafkaProducer<K, V> createProducer(Class<? extends Serializer<K>> keySerializer, Class<? extends Serializer<V>> valueSerializer,
Map<String, Object> extraConfig) {
Properties props = new Properties();
props.put("bootstrap.servers", bootstrapServers);
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", keySerializer.getName());
props.put("value.serializer", valueSerializer.getName());
for (Map.Entry<String, Object> extraCfgEntry : extraConfig.entrySet()) {
props.put(extraCfgEntry.getKey(), extraCfgEntry.getValue());
}
return new KafkaProducer<>(props);
}
示例2: buildBasicSerializer
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
/**
* Creates a basic serializer using the passed stateless function and not implementing close or configure
*
* @param serializeFunc The function to serialize T to a byte[]
* @param <T> The type of object to serialize
* @return A byte[] representation of T
*/
public static <T> Serializer<T> buildBasicSerializer(final SerializeFunc<T> serializeFunc) {
return new Serializer<T>() {
@Override
public void configure(final Map<String, ?> configs, final boolean isKey) {
}
@Override
public byte[] serialize(final String topic, final T data) {
return serializeFunc.serialize(topic, data);
}
@Override
public void close() {
}
};
}
示例3: getKafkaProducer
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
/**
* Creates a kafka producer that is connected to our test server.
* @param <K> Type of message key
* @param <V> Type of message value
* @param keySerializer Class of serializer to be used for keys.
* @param valueSerializer Class of serializer to be used for values.
* @return KafkaProducer configured to produce into Test server.
*/
public <K, V> KafkaProducer<K, V> getKafkaProducer(
final Class<? extends Serializer<K>> keySerializer,
final Class<? extends Serializer<V>> valueSerializer) {
// Build config
final Map<String, Object> kafkaProducerConfig = Maps.newHashMap();
kafkaProducerConfig.put("bootstrap.servers", getKafkaConnectString());
kafkaProducerConfig.put("key.serializer", keySerializer);
kafkaProducerConfig.put("value.serializer", valueSerializer);
kafkaProducerConfig.put("max.in.flight.requests.per.connection", 1);
kafkaProducerConfig.put("retries", 5);
kafkaProducerConfig.put("client.id", getClass().getSimpleName() + " Producer");
kafkaProducerConfig.put("batch.size", 0);
// Create and return Producer.
return new KafkaProducer<>(kafkaProducerConfig);
}
示例4: to
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void to(final Serde<K> keySerde, final Serde<V> valSerde, StreamPartitioner<? super K, ? super V> partitioner, final String topic) {
Objects.requireNonNull(topic, "topic can't be null");
final String name = topology.newName(SINK_NAME);
final Serializer<K> keySerializer = keySerde == null ? null : keySerde.serializer();
final Serializer<V> valSerializer = valSerde == null ? null : valSerde.serializer();
if (partitioner == null && keySerializer != null && keySerializer instanceof WindowedSerializer) {
final WindowedSerializer<Object> windowedSerializer = (WindowedSerializer<Object>) keySerializer;
partitioner = (StreamPartitioner<K, V>) new WindowedStreamPartitioner<Object, V>(topic, windowedSerializer);
}
topology.addSink(name, topic, keySerializer, valSerializer, partitioner, this.name);
}
示例5: configure
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (inner == null) {
String propertyName = isKey ? "key.serializer.inner.class" : "value.serializer.inner.class";
Object innerSerializerClass = configs.get(propertyName);
propertyName = (innerSerializerClass == null) ? "serializer.inner.class" : propertyName;
String value = null;
try {
value = (String) configs.get(propertyName);
inner = Serializer.class.cast(Utils.newInstance(value, Serializer.class));
inner.configure(configs, isKey);
} catch (ClassNotFoundException e) {
throw new ConfigException(propertyName, value, "Class " + value + " could not be found.");
}
}
}
示例6: send
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Override
public <K, V> void send(final String topic,
final K key,
final V value,
final Long timestamp,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer,
final StreamPartitioner<? super K, ? super V> partitioner) {
Integer partition = null;
if (partitioner != null) {
final List<PartitionInfo> partitions = producer.partitionsFor(topic);
if (partitions.size() > 0) {
partition = partitioner.partition(key, value, partitions.size());
} else {
throw new StreamsException("Could not get partition information for topic '" + topic + "'." +
" This can happen if the topic does not exist.");
}
}
send(topic, key, value, partition, timestamp, keySerializer, valueSerializer);
}
示例7: init
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void init(final ProcessorContext context) {
super.init(context);
this.context = context;
// if serializers are null, get the default ones from the context
if (keySerializer == null) {
keySerializer = (Serializer<K>) context.keySerde().serializer();
}
if (valSerializer == null) {
valSerializer = (Serializer<V>) context.valueSerde().serializer();
}
// if value serializers are for {@code Change} values, set the inner serializer when necessary
if (valSerializer instanceof ChangedSerializer &&
((ChangedSerializer) valSerializer).inner() == null) {
((ChangedSerializer) valSerializer).setInner(context.valueSerde().serializer());
}
}
示例8: addSink
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
/**
* Add a new sink that forwards records from upstream parent processor and/or source nodes to the named Kafka topic.
* The sink will use the specified key and value serializers, and the supplied partitioner.
*
* @param name the unique name of the sink
* @param topic the name of the Kafka topic to which this sink should write its records
* @param keySerializer the {@link Serializer key serializer} used when consuming records; may be null if the sink
* should use the {@link org.apache.kafka.streams.StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG default key serializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}
* @param valSerializer the {@link Serializer value serializer} used when consuming records; may be null if the sink
* should use the {@link org.apache.kafka.streams.StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG default value serializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}
* @param partitioner the function that should be used to determine the partition for each record processed by the sink
* @param parentNames the name of one or more source or processor nodes whose output records this sink should consume
* and write to its topic
* @return this builder instance so methods can be chained together; never null
* @see #addSink(String, String, String...)
* @see #addSink(String, String, StreamPartitioner, String...)
* @see #addSink(String, String, Serializer, Serializer, String...)
* @throws TopologyBuilderException if parent processor is not added yet, or if this processor's name is equal to the parent's name
*/
public synchronized final <K, V> TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames) {
Objects.requireNonNull(name, "name must not be null");
Objects.requireNonNull(topic, "topic must not be null");
if (nodeFactories.containsKey(name))
throw new TopologyBuilderException("Processor " + name + " is already added.");
for (final String parent : parentNames) {
if (parent.equals(name)) {
throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself.");
}
if (!nodeFactories.containsKey(parent)) {
throw new TopologyBuilderException("Parent processor " + parent + " is not added yet.");
}
}
nodeFactories.put(name, new SinkNodeFactory<>(name, parentNames, topic, keySerializer, valSerializer, partitioner));
nodeToSinkTopic.put(name, topic);
nodeGrouper.add(name);
nodeGrouper.unite(name, parentNames);
return this;
}
示例9: testWindowedSerializerNoArgConstructors
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Test
public void testWindowedSerializerNoArgConstructors() {
Map<String, String> props = new HashMap<>();
// test key[value].serializer.inner.class takes precedence over serializer.inner.class
WindowedSerializer<StringSerializer> windowedSerializer = new WindowedSerializer<>();
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "host:1");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
props.put("key.serializer.inner.class", "org.apache.kafka.common.serialization.StringSerializer");
props.put("serializer.inner.class", "org.apache.kafka.common.serialization.StringSerializer");
windowedSerializer.configure(props, true);
Serializer<?> inner = windowedSerializer.innerSerializer();
assertNotNull("Inner serializer should be not null", inner);
assertTrue("Inner serializer type should be StringSerializer", inner instanceof StringSerializer);
// test serializer.inner.class
props.put("serializer.inner.class", "org.apache.kafka.common.serialization.ByteArraySerializer");
props.remove("key.serializer.inner.class");
props.remove("value.serializer.inner.class");
WindowedSerializer<?> windowedSerializer1 = new WindowedSerializer<>();
windowedSerializer1.configure(props, false);
Serializer<?> inner1 = windowedSerializer1.innerSerializer();
assertNotNull("Inner serializer should be not null", inner1);
assertTrue("Inner serializer type should be ByteArraySerializer", inner1 instanceof ByteArraySerializer);
}
示例10: shouldCreateLoggingEnabledStoreWhenWindowStoreLogged
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Test
public void shouldCreateLoggingEnabledStoreWhenWindowStoreLogged() throws Exception {
store = createStore(true, false);
final List<ProducerRecord> logged = new ArrayList<>();
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic,
K key,
V value,
Integer partition,
Long timestamp,
Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
}
};
final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(),
Serdes.String(),
Serdes.String(),
collector,
cache);
context.setTime(1);
store.init(context, store);
store.put("a", "b");
assertFalse(logged.isEmpty());
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:27,代碼來源:RocksDBWindowStoreSupplierTest.java
示例11: shouldNotBeLoggingEnabledStoreWhenLogginNotEnabled
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Test
public void shouldNotBeLoggingEnabledStoreWhenLogginNotEnabled() throws Exception {
store = createStore(false, false);
final List<ProducerRecord> logged = new ArrayList<>();
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic,
K key,
V value,
Integer partition,
Long timestamp,
Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
}
};
final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(),
Serdes.String(),
Serdes.String(),
collector,
cache);
context.setTime(1);
store.init(context, store);
store.put("a", "b");
assertTrue(logged.isEmpty());
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:27,代碼來源:RocksDBWindowStoreSupplierTest.java
示例12: before
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Before
public void before() {
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic,
K key,
V value,
Integer partition,
Long timestamp,
Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
sent.put(key, value);
}
};
context = new MockProcessorContext(
TestUtils.tempDirectory(),
Serdes.String(),
Serdes.Long(),
collector,
new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())));
context.setTime(0);
store.init(context, store);
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:24,代碼來源:ChangeLoggingKeyValueBytesStoreTest.java
示例13: shouldCreateLoggingEnabledStoreWhenStoreLogged
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Test
public void shouldCreateLoggingEnabledStoreWhenStoreLogged() throws Exception {
store = createStore(true, false);
final List<ProducerRecord> logged = new ArrayList<>();
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic,
K key,
V value,
Integer partition,
Long timestamp,
Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
}
};
final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(),
Serdes.String(),
Serdes.String(),
collector,
cache);
context.setTime(1);
store.init(context, store);
store.put("a", "b");
assertFalse(logged.isEmpty());
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:27,代碼來源:RocksDBKeyValueStoreSupplierTest.java
示例14: shouldNotBeLoggingEnabledStoreWhenLoggingNotEnabled
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Test
public void shouldNotBeLoggingEnabledStoreWhenLoggingNotEnabled() throws Exception {
store = createStore(false, false);
final List<ProducerRecord> logged = new ArrayList<>();
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic,
K key,
V value,
Integer partition,
Long timestamp,
Serializer<K> keySerializer,
Serializer<V> valueSerializer) {
logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
}
};
final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(),
Serdes.String(),
Serdes.String(),
collector,
cache);
context.setTime(1);
store.init(context, store);
store.put("a", "b");
assertTrue(logged.isEmpty());
}
開發者ID:YMCoding,項目名稱:kafka-0.11.0.0-src-with-comment,代碼行數:27,代碼來源:RocksDBKeyValueStoreSupplierTest.java
示例15: defaultSerdeShouldBeConfigured
import org.apache.kafka.common.serialization.Serializer; //導入依賴的package包/類
@Test
public void defaultSerdeShouldBeConfigured() {
final Map<String, Object> serializerConfigs = new HashMap<>();
serializerConfigs.put("key.serializer.encoding", "UTF8");
serializerConfigs.put("value.serializer.encoding", "UTF-16");
final Serializer<String> serializer = Serdes.String().serializer();
final String str = "my string for testing";
final String topic = "my topic";
serializer.configure(serializerConfigs, true);
assertEquals("Should get the original string after serialization and deserialization with the configured encoding",
str, streamsConfig.defaultKeySerde().deserializer().deserialize(topic, serializer.serialize(topic, str)));
serializer.configure(serializerConfigs, false);
assertEquals("Should get the original string after serialization and deserialization with the configured encoding",
str, streamsConfig.defaultValueSerde().deserializer().deserialize(topic, serializer.serialize(topic, str)));
}