本文整理匯總了Java中org.apache.kafka.common.serialization.ByteArraySerializer類的典型用法代碼示例。如果您正苦於以下問題:Java ByteArraySerializer類的具體用法?Java ByteArraySerializer怎麽用?Java ByteArraySerializer使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
ByteArraySerializer類屬於org.apache.kafka.common.serialization包,在下文中一共展示了ByteArraySerializer類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: initProducer
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
private void initProducer(String bootstrapServer) {
producer = TestUtils.createNewProducer(
bootstrapServer,
1,
60 * 1000L,
1024L * 1024L,
0,
0L,
5 * 1000L,
SecurityProtocol.PLAINTEXT,
null,
Option$.MODULE$.apply(new Properties()),
new StringSerializer(),
new ByteArraySerializer(),
Option$.MODULE$.apply(new Properties()));
}
示例2: produceRecords
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
private static void produceRecords(String bootstrapServers) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<Long, byte[]> producer = new KafkaProducer<>(properties);
LongStream.rangeClosed(1, 100).boxed()
.map(number ->
new ProducerRecord<>(
TOPIC, //topic
number, //key
String.format("record-%s", number.toString()).getBytes())) //value
.forEach(record -> producer.send(record));
producer.close();
}
示例3: produceRecords
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
private static void produceRecords(String bootstrapServers) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<String, byte[]> producer = new KafkaProducer<>(properties);
IntStream.rangeClosed(1, 100).boxed()
.map(number -> new ProducerRecord<>(
TOPIC, //topic
number.toString(), //key
UserAvroSerdes.serialize(new User(String.format("user-%s", number.toString()))))) //value
.forEach(record -> producer.send(record));
producer.close();
}
示例4: main
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
public static void main(String[] args) {
final ActorSystem system = ActorSystem.create("KafkaProducerSystem");
final Materializer materializer = ActorMaterializer.create(system);
final ProducerSettings<byte[], String> producerSettings =
ProducerSettings
.create(system, new ByteArraySerializer(), new StringSerializer())
.withBootstrapServers("localhost:9092");
CompletionStage<Done> done =
Source.range(1, 100)
.map(n -> n.toString())
.map(elem ->
new ProducerRecord<byte[], String>(
"topic1-ts",
0,
Instant.now().getEpochSecond(),
null,
elem))
.runWith(Producer.plainSink(producerSettings), materializer);
done.whenComplete((d, ex) -> System.out.println("sent"));
}
示例5: produceRecords
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
private static void produceRecords() {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<Integer, byte[]> producer = new KafkaProducer<>(properties);
IntStream.rangeClosed(1, 10000).boxed()
.map(number ->
new ProducerRecord<>(
TOPIC,
1, //Key
KafkaProducerUtil.createMessage(1000))) //Value
.forEach(record -> {
producer.send(record);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
});
producer.close();
}
示例6: produceRecords
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
private static void produceRecords(String bootstrapServers) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<Integer, byte[]> producer = new KafkaProducer<>(properties);
IntStream.rangeClosed(1, 10000).boxed()
.map(number ->
new ProducerRecord<>(
TOPIC,
1, //Key
KafkaProducerUtil.createMessage(1000))) //Value
.forEach(record -> {
producer.send(record);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
});
producer.close();
}
示例7: setupAndCreateKafkaBasedLog
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
KafkaBasedLog<String, byte[]> setupAndCreateKafkaBasedLog(String topic, final WorkerConfig config) {
Map<String, Object> producerProps = new HashMap<>();
producerProps.putAll(config.originals());
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
producerProps.put(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE);
Map<String, Object> consumerProps = new HashMap<>();
consumerProps.putAll(config.originals());
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
Map<String, Object> adminProps = new HashMap<>(config.originals());
NewTopic topicDescription = TopicAdmin.defineTopic(topic).
compacted().
partitions(1).
replicationFactor(config.getShort(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG)).
build();
return createKafkaBasedLog(topic, producerProps, consumerProps, new ConsumeCallback(), topicDescription, adminProps);
}
示例8: testWindowedSerializerNoArgConstructors
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
@Test
public void testWindowedSerializerNoArgConstructors() {
Map<String, String> props = new HashMap<>();
// test key[value].serializer.inner.class takes precedence over serializer.inner.class
WindowedSerializer<StringSerializer> windowedSerializer = new WindowedSerializer<>();
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "host:1");
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
props.put("key.serializer.inner.class", "org.apache.kafka.common.serialization.StringSerializer");
props.put("serializer.inner.class", "org.apache.kafka.common.serialization.StringSerializer");
windowedSerializer.configure(props, true);
Serializer<?> inner = windowedSerializer.innerSerializer();
assertNotNull("Inner serializer should be not null", inner);
assertTrue("Inner serializer type should be StringSerializer", inner instanceof StringSerializer);
// test serializer.inner.class
props.put("serializer.inner.class", "org.apache.kafka.common.serialization.ByteArraySerializer");
props.remove("key.serializer.inner.class");
props.remove("value.serializer.inner.class");
WindowedSerializer<?> windowedSerializer1 = new WindowedSerializer<>();
windowedSerializer1.configure(props, false);
Serializer<?> inner1 = windowedSerializer1.innerSerializer();
assertNotNull("Inner serializer should be not null", inner1);
assertTrue("Inner serializer type should be ByteArraySerializer", inner1 instanceof ByteArraySerializer);
}
示例9: setProduceConsumeProperties
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
private Properties setProduceConsumeProperties(final String clientId) {
Properties props = new Properties();
props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
// the socket buffer needs to be large, especially when running in AWS with
// high latency. if running locally the default is fine.
props.put(ProducerConfig.SEND_BUFFER_CONFIG, SOCKET_SIZE_BYTES);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
// the socket buffer needs to be large, especially when running in AWS with
// high latency. if running locally the default is fine.
props.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, SOCKET_SIZE_BYTES);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, MAX_POLL_RECORDS);
return props;
}
示例10: testConstructorFailureCloseResource
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
@Test
public void testConstructorFailureCloseResource() {
Properties props = new Properties();
props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar.local:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
try {
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(
props, new ByteArraySerializer(), new ByteArraySerializer());
} catch (KafkaException e) {
assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
assertEquals("Failed to construct kafka producer", e.getMessage());
return;
}
fail("should have caught an exception and returned");
}
示例11: KafkaManager
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
public KafkaManager(final LoggerContext loggerContext, final String name, final String topic, final String zkServers, final String mail, final String rpc,
final String app, final String host, final Property[] properties) {
super(loggerContext, name);
this.topic = topic;
this.zkServers = zkServers;
this.mail = mail;
this.rpc = rpc;
this.app = app;
this.orginApp = app;
this.host = host;
this.checkAndSetConfig(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
this.checkAndSetConfig(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
// 設置分區類, 使用自定義的KeyModPartitioner,同樣的key進入相同的partition
this.checkAndSetConfig(ProducerConfig.PARTITIONER_CLASS_CONFIG, KeyModPartitioner.class.getName());
// xml配置裏麵的參數
for (final Property property : properties) {
this.config.put(property.getName(), property.getValue());
}
// 由於容器部署需要從外部獲取host
this.config.put(ProducerConfig.CLIENT_ID_CONFIG, this.app + Constants.MIDDLE_LINE + this.host + Constants.MIDDLE_LINE + "log4j2");
}
示例12: testConstructorFailureCloseResource
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
@Test
public void testConstructorFailureCloseResource() {
Properties props = new Properties();
props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "some.invalid.hostname.foo.bar:9999");
props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
try {
KafkaProducer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(
props, new ByteArraySerializer(), new ByteArraySerializer());
} catch (KafkaException e) {
Assert.assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
Assert.assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
Assert.assertEquals("Failed to construct kafka producer", e.getMessage());
return;
}
Assert.fail("should have caught an exception and returned");
}
示例13: testZeroLengthValue
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
@Test
public void testZeroLengthValue() throws Exception {
Properties producerPropertyOverrides = new Properties();
producerPropertyOverrides.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
try (LiKafkaProducer producer = createProducer(producerPropertyOverrides)) {
producer.send(new ProducerRecord<>("testZeroLengthValue", "key", new byte[0])).get();
}
Properties consumerProps = new Properties();
consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
try (LiKafkaConsumer consumer = createConsumer(consumerProps)) {
consumer.subscribe(Collections.singleton("testZeroLengthValue"));
long startMs = System.currentTimeMillis();
ConsumerRecords records = ConsumerRecords.empty();
while (records.isEmpty() && System.currentTimeMillis() < startMs + 30000) {
records = consumer.poll(100);
}
assertEquals(1, records.count());
ConsumerRecord record = (ConsumerRecord) records.iterator().next();
assertEquals("key", record.key());
assertEquals(((byte[]) record.value()).length, 0);
}
}
示例14: buildIOWriter
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
@Override
public PTransform<? super PCollection<BeamRecord>, PDone> buildIOWriter() {
checkArgument(topics != null && topics.size() == 1,
"Only one topic can be acceptable as output.");
return new PTransform<PCollection<BeamRecord>, PDone>() {
@Override
public PDone expand(PCollection<BeamRecord> input) {
return input.apply("out_reformat", getPTransformForOutput()).apply("persistent",
KafkaIO.<byte[], byte[]>write()
.withBootstrapServers(bootstrapServers)
.withTopic(topics.get(0))
.withKeySerializer(ByteArraySerializer.class)
.withValueSerializer(ByteArraySerializer.class));
}
};
}
示例15: InfluxDBKafkaSender
import org.apache.kafka.common.serialization.ByteArraySerializer; //導入依賴的package包/類
public InfluxDBKafkaSender(String database, TimeUnit timePrecision, String measurementPrefix) {
super(database, timePrecision, measurementPrefix);
int idx = database.indexOf("@");
String hosts;
if (idx != -1) {
topic = database.substring(0, idx);
hosts = database.substring(idx + 1);
} else {
throw new IllegalArgumentException("invalid database format: " + database +", expected: [email protected],host2...");
}
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, hosts);
props.put(ProducerConfig.CLIENT_ID_CONFIG, KAFKA_CLIENT_ID);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
kafkaProducer = new KafkaProducer<>(props);
}