本文整理汇总了Java中org.apache.kafka.clients.producer.KafkaProducer类的典型用法代码示例。如果您正苦于以下问题:Java KafkaProducer类的具体用法?Java KafkaProducer怎么用?Java KafkaProducer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KafkaProducer类属于org.apache.kafka.clients.producer包,在下文中一共展示了KafkaProducer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException, IOException {
UncaughtExceptionHandling.setup();
KafkaProducer<ByteBuffer, ByteBuffer> msgProducer = KAFKA_CLIENTS
.createProducer(ByteBufferSerializer.class, ByteBufferSerializer.class);
LOG.info("Sending ...");
for(int i = 0; i < TOTAL_MSGS; i++) {
ByteBuffer data = ByteBuffer.allocate(4).putInt(i);
msgProducer.send(new ProducerRecord<>(KMQ_CONFIG.getMsgTopic(), data));
try { Thread.sleep(100L); } catch (InterruptedException e) { throw new RuntimeException(e); }
LOG.info(String.format("Sent message %d", i));
}
msgProducer.close();
LOG.info("Sent");
}
示例2: KafkaConsumerEvent
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public KafkaConsumerEvent(String topic) {
super(0l);
this.topic = topic;
Properties props = HeartBeatConfigContainer.getInstance().getKafkaConsumerConfig();
Properties producerProps = HeartBeatConfigContainer.getInstance().getKafkaProducerConfig();
try {
dataConsumer = new KafkaConsumer<>(props);
partition0 = new TopicPartition(this.topic, 0);
dataConsumer.assign(Arrays.asList(partition0));
dataConsumer.seekToEnd(Arrays.asList(partition0));
KafkaConsumerContainer.getInstances().putConsumer(this.topic, dataConsumer);
statProducer = new KafkaProducer<>(producerProps);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
startTime = System.currentTimeMillis();
}
示例3: main
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ACKS_CONFIG, "all");
props.put(RETRIES_CONFIG, 0);
props.put(BATCH_SIZE_CONFIG, 32000);
props.put(LINGER_MS_CONFIG, 100);
props.put(BUFFER_MEMORY_CONFIG, 33554432);
props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");
props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");
Producer<Long, Long> producer = new KafkaProducer<>(props);
long t1 = System.currentTimeMillis();
long i = 0;
for(; i < 1000000; i++) {
producer.send(new ProducerRecord<>("produktion", i, i));
}
producer.send(new ProducerRecord<Long,Long>("produktion", (long) -1, (long)-1));
System.out.println("fertig " + i + " Nachrichten in " + (System.currentTimeMillis() - t1 + " ms"));
producer.close();
}
示例4: RealTimeTradeProducer
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private RealTimeTradeProducer(int index, String broker, String topic, int tradesPerSecond, int keysFrom, int keysTo) throws IOException,
URISyntaxException {
if (tradesPerSecond <= 0) {
throw new RuntimeException("tradesPerSecond=" + tradesPerSecond);
}
this.index = index;
this.topic = topic;
this.tradesPerSecond = tradesPerSecond;
tickers = new String[keysTo - keysFrom];
Arrays.setAll(tickers, i -> "T-" + Integer.toString(i + keysFrom));
Properties props = new Properties();
props.setProperty("bootstrap.servers", broker);
props.setProperty("key.serializer", LongSerializer.class.getName());
props.setProperty("value.serializer", TradeSerializer.class.getName());
this.producer = new KafkaProducer<>(props);
}
示例5: onScheduled
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@OnScheduled
public void onScheduled(final ProcessContext context) {
try {
topic = context.getProperty(TOPIC).getValue();
brokerIP = context.getProperty(BROKERIP).getValue();
props = new Properties();
props.put("bootstrap.servers", brokerIP);
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
producer = new KafkaProducer<>(props);
} catch (Exception e) {
e.printStackTrace();
}
}
示例6: main
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
CommandLine commandLine = parseCommandLine(args);
String zkUrl = commandLine.getOptionValue(ZOOKEEPER);
String topic = commandLine.getOptionValue(TOPIC);
int numMessages = Integer.parseInt(commandLine.getOptionValue(NUM_MESSAGES));
Random random = new Random();
Properties props = OperatorUtil.createKafkaProducerProperties(zkUrl);
KafkaProducer kafkaProducer = new KafkaProducer<>(props);
byte[] key = new byte[16];
byte[] data = new byte[1024];
for (int i = 0; i < numMessages; i++) {
for (int j = 0; j < data.length; j++) {
data[j] = (byte)random.nextInt();
}
ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord(
topic, 0, System.currentTimeMillis(), key, data);
Future<RecordMetadata> future = kafkaProducer.send(producerRecord);
future.get();
if (i % 100 == 0) {
System.out.println("Have wrote " + i + " messages to kafka");
}
}
}
示例7: doTestNullKeyNoHeader
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private void doTestNullKeyNoHeader() throws Exception {
final KafkaChannel channel = startChannel(false);
Properties props = channel.getProducerProps();
KafkaProducer<String, byte[]> producer = new KafkaProducer<String, byte[]>(props);
for (int i = 0; i < 50; i++) {
ProducerRecord<String, byte[]> data =
new ProducerRecord<String, byte[]>(topic, null, String.valueOf(i).getBytes());
producer.send(data).get();
}
ExecutorCompletionService<Void> submitterSvc = new
ExecutorCompletionService<Void>(Executors.newCachedThreadPool());
List<Event> events = pullEvents(channel, submitterSvc,
50, false, false);
wait(submitterSvc, 5);
List<String> finals = new ArrayList<String>(50);
for (int i = 0; i < 50; i++) {
finals.add(i, events.get(i).getHeaders().get(KEY_HEADER));
}
for (int i = 0; i < 50; i++) {
Assert.assertTrue( finals.get(i) == null);
}
channel.stop();
}
示例8: test_create_tracing_serializer
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@Test
public void test_create_tracing_serializer() throws Exception {
Properties props = new Properties();
props.put("bootstrap.servers", "127.0.0.1:9092");//该地址是集群的子集,用来探测集群。
props.put("acks", "all");// 记录完整提交,最慢的但是最大可能的持久化
props.put("retries", 3);// 请求失败重试的次数
props.put("batch.size", 16384);// batch的大小
props.put("linger.ms", 1);// 默认情况即使缓冲区有剩余的空间,也会立即发送请求,设置一段时间用来等待从而将缓冲区填的更多,单位为毫秒,producer发送数据会延迟1ms,可以减少发送到kafka服务器的请求数据
props.put("buffer.memory", 33554432);// 提供给生产者缓冲内存总量
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
for (int i = 0; i < 1000; i++) {
producer.send(new ProducerRecord<>("test", "hello", "kafka - " + i));
Thread.sleep(10000);
}
}
示例9: getProducer
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
static Producer<String, String> getProducer(KafkaOutputConfiguration configuration) {
if (producer == null) {
synchronized (KafkaOutput.class) {
if (producer != null) {
return producer;
}
Properties props = new Properties();
props.put("bootstrap.servers", configuration.getHost() + ":" + configuration.getPort());
props.put("acks", "all");
props.put("retries", 0);
props.put("request.required.acks", "0");
props.put("batch.size", 64);
props.put("linger.ms", 1);
props.put("buffer.memory", 1024);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
producer = new KafkaProducer<>(props);
}
return producer;
} else {
return producer;
}
}
示例10: send
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@Override
public void send(Long k, byte[] v) {
KafkaProducer<Long, byte[]> p = getWorker();
p.initTransactions();
p.beginTransaction();
Future<RecordMetadata> res = worker.send(new ProducerRecord<Long, byte[]>(topic, k, v));
RecordMetadata record;
try {
record = res.get();
offsets.clear();
offsets.put(new TopicPartition(topic, record.partition()), new OffsetAndMetadata(record.offset()));
p.sendOffsetsToTransaction(offsets, MallConstants.ORDER_GROUP);
p.commitTransaction();
} catch (InterruptedException | ExecutionException e) {
p.abortTransaction();
}
}
示例11: sendData
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public void sendData(String data) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
Map<MetricName, ? extends Metric> metrics = producer.metrics();
System.out.println(metrics);
for (int i = 0; i < 100; i++)
producer.send(new ProducerRecord<String, String>("video_view", data));
producer.close();
}
示例12: shouldWriteThenRead
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@Test
public void shouldWriteThenRead() throws Exception {
//Create a consumer
ConsumerIterator<String, String> it = buildConsumer(Original.topic);
//Create a producer
producer = new KafkaProducer<>(producerProps());
//send a message
producer.send(new ProducerRecord<>(Original.topic, "message")).get();
//read it back
MessageAndMetadata<String, String> messageAndMetadata = it.next();
String value = messageAndMetadata.message();
assertThat(value, is("message"));
}
示例13: produceRecords
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private static void produceRecords(String bootstrapServers) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<Long, byte[]> producer = new KafkaProducer<>(properties);
LongStream.rangeClosed(1, 100).boxed()
.map(number ->
new ProducerRecord<>(
TOPIC, //topic
number, //key
String.format("record-%s", number.toString()).getBytes())) //value
.forEach(record -> producer.send(record));
producer.close();
}
示例14: start
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public void start() throws InterruptedException {
RandomGenerator random = RandomManager.getRandom();
Properties props = ConfigUtils.keyValueToProperties(
"bootstrap.servers", "localhost:" + kafkaPort,
"key.serializer", "org.apache.kafka.common.serialization.StringSerializer",
"value.serializer", "org.apache.kafka.common.serialization.StringSerializer",
"compression.type", "gzip",
"batch.size", 0,
"acks", 1,
"max.request.size", 1 << 26 // TODO
);
try (Producer<String,String> producer = new KafkaProducer<>(props)) {
for (int i = 0; i < howMany; i++) {
Pair<String,String> datum = datumGenerator.generate(i, random);
ProducerRecord<String,String> record =
new ProducerRecord<>(topic, datum.getFirst(), datum.getSecond());
producer.send(record);
log.debug("Sent datum {} = {}", record.key(), record.value());
if (intervalMsec > 0) {
Thread.sleep(intervalMsec);
}
}
}
}
示例15: produceRecords
import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private static void produceRecords(String bootstrapServers) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
Producer<Integer, byte[]> producer = new KafkaProducer<>(properties);
IntStream.rangeClosed(1, 10000).boxed()
.map(number ->
new ProducerRecord<>(
TOPIC,
1, //Key
KafkaProducerUtil.createMessage(1000))) //Value
.forEach(record -> {
producer.send(record);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
});
producer.close();
}