当前位置: 首页>>代码示例>>Java>>正文


Java KafkaProducer类代码示例

本文整理汇总了Java中org.apache.kafka.clients.producer.KafkaProducer的典型用法代码示例。如果您正苦于以下问题:Java KafkaProducer类的具体用法?Java KafkaProducer怎么用?Java KafkaProducer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


KafkaProducer类属于org.apache.kafka.clients.producer包,在下文中一共展示了KafkaProducer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException, IOException {
    UncaughtExceptionHandling.setup();

    KafkaProducer<ByteBuffer, ByteBuffer> msgProducer = KAFKA_CLIENTS
            .createProducer(ByteBufferSerializer.class, ByteBufferSerializer.class);

    LOG.info("Sending ...");

    for(int i = 0; i < TOTAL_MSGS; i++) {
        ByteBuffer data = ByteBuffer.allocate(4).putInt(i);
        msgProducer.send(new ProducerRecord<>(KMQ_CONFIG.getMsgTopic(), data));
        try { Thread.sleep(100L); } catch (InterruptedException e) { throw new RuntimeException(e); }
        LOG.info(String.format("Sent message %d", i));
    }

    msgProducer.close();

    LOG.info("Sent");
}
 
开发者ID:softwaremill,项目名称:kmq,代码行数:20,代码来源:StandaloneSender.java

示例2: KafkaConsumerEvent

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public KafkaConsumerEvent(String topic) {
    super(0l);
    this.topic = topic;
    Properties props = HeartBeatConfigContainer.getInstance().getKafkaConsumerConfig();
    Properties producerProps = HeartBeatConfigContainer.getInstance().getKafkaProducerConfig();
    try {
        dataConsumer = new KafkaConsumer<>(props);
        partition0 = new TopicPartition(this.topic, 0);
        dataConsumer.assign(Arrays.asList(partition0));
        dataConsumer.seekToEnd(Arrays.asList(partition0));
        KafkaConsumerContainer.getInstances().putConsumer(this.topic, dataConsumer);

        statProducer = new KafkaProducer<>(producerProps);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    startTime = System.currentTimeMillis();
}
 
开发者ID:BriData,项目名称:DBus,代码行数:20,代码来源:KafkaConsumerEvent.java

示例3: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {

        Properties props = new Properties();
        props.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(ACKS_CONFIG, "all");
        props.put(RETRIES_CONFIG, 0);
        props.put(BATCH_SIZE_CONFIG, 32000);
        props.put(LINGER_MS_CONFIG, 100);
        props.put(BUFFER_MEMORY_CONFIG, 33554432);
        props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");
        props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.LongSerializer");

        Producer<Long, Long> producer = new KafkaProducer<>(props);

        long t1 = System.currentTimeMillis();

        long i = 0;
        for(; i < 1000000; i++) {

            producer.send(new ProducerRecord<>("produktion", i, i));
        }
        producer.send(new ProducerRecord<Long,Long>("produktion", (long) -1, (long)-1));
        System.out.println("fertig " + i  + " Nachrichten in " + (System.currentTimeMillis() - t1 + " ms"));

        producer.close();
    }
 
开发者ID:predic8,项目名称:apache-kafka-demos,代码行数:27,代码来源:PerformanceProducer.java

示例4: RealTimeTradeProducer

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private RealTimeTradeProducer(int index, String broker, String topic, int tradesPerSecond, int keysFrom, int keysTo) throws IOException,
        URISyntaxException {
    if (tradesPerSecond <= 0) {
        throw new RuntimeException("tradesPerSecond=" + tradesPerSecond);
    }
    this.index = index;
    this.topic = topic;
    this.tradesPerSecond = tradesPerSecond;
    tickers = new String[keysTo - keysFrom];
    Arrays.setAll(tickers, i -> "T-" + Integer.toString(i + keysFrom));
    Properties props = new Properties();
    props.setProperty("bootstrap.servers", broker);
    props.setProperty("key.serializer", LongSerializer.class.getName());
    props.setProperty("value.serializer", TradeSerializer.class.getName());
    this.producer = new KafkaProducer<>(props);
}
 
开发者ID:hazelcast,项目名称:big-data-benchmark,代码行数:17,代码来源:RealTimeTradeProducer.java

示例5: onScheduled

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@OnScheduled
public void onScheduled(final ProcessContext context) {
    try {
        topic = context.getProperty(TOPIC).getValue();
        brokerIP = context.getProperty(BROKERIP).getValue();
        props = new Properties();
        props.put("bootstrap.servers", brokerIP);
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", 16384);
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        producer = new KafkaProducer<>(props);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:dream-lab,项目名称:echo,代码行数:20,代码来源:KafkaFlowFilesProducer.java

示例6: main

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zkUrl = commandLine.getOptionValue(ZOOKEEPER);
  String topic = commandLine.getOptionValue(TOPIC);
  int numMessages = Integer.parseInt(commandLine.getOptionValue(NUM_MESSAGES));

  Random random = new Random();
  Properties props = OperatorUtil.createKafkaProducerProperties(zkUrl);
  KafkaProducer kafkaProducer = new KafkaProducer<>(props);

  byte[] key = new byte[16];
  byte[] data = new byte[1024];
  for (int i = 0; i < numMessages; i++) {
    for (int j = 0; j < data.length; j++) {
      data[j] = (byte)random.nextInt();
    }
    ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord(
        topic, 0, System.currentTimeMillis(), key, data);
    Future<RecordMetadata> future = kafkaProducer.send(producerRecord);
    future.get();
    if (i % 100 == 0) {
      System.out.println("Have wrote " + i + " messages to kafka");
    }
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:26,代码来源:KafkaWriter.java

示例7: doTestNullKeyNoHeader

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private void doTestNullKeyNoHeader() throws Exception {
  final KafkaChannel channel = startChannel(false);
  Properties props = channel.getProducerProps();
  KafkaProducer<String, byte[]> producer = new KafkaProducer<String, byte[]>(props);

  for (int i = 0; i < 50; i++) {
    ProducerRecord<String, byte[]> data =
        new ProducerRecord<String, byte[]>(topic, null, String.valueOf(i).getBytes());
    producer.send(data).get();
  }
  ExecutorCompletionService<Void> submitterSvc = new
          ExecutorCompletionService<Void>(Executors.newCachedThreadPool());
  List<Event> events = pullEvents(channel, submitterSvc,
          50, false, false);
  wait(submitterSvc, 5);
  List<String> finals = new ArrayList<String>(50);
  for (int i = 0; i < 50; i++) {
    finals.add(i, events.get(i).getHeaders().get(KEY_HEADER));
  }
  for (int i = 0; i < 50; i++) {
    Assert.assertTrue( finals.get(i) == null);
  }
  channel.stop();
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:25,代码来源:TestKafkaChannel.java

示例8: test_create_tracing_serializer

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@Test
public void test_create_tracing_serializer() throws Exception {
    Properties props = new Properties();
    props.put("bootstrap.servers", "127.0.0.1:9092");//该地址是集群的子集,用来探测集群。
    props.put("acks", "all");// 记录完整提交,最慢的但是最大可能的持久化
    props.put("retries", 3);// 请求失败重试的次数
    props.put("batch.size", 16384);// batch的大小
    props.put("linger.ms", 1);// 默认情况即使缓冲区有剩余的空间,也会立即发送请求,设置一段时间用来等待从而将缓冲区填的更多,单位为毫秒,producer发送数据会延迟1ms,可以减少发送到kafka服务器的请求数据
    props.put("buffer.memory", 33554432);// 提供给生产者缓冲内存总量
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    KafkaProducer<String, String> producer = new KafkaProducer<>(props);
    for (int i = 0; i < 1000; i++) {
        producer.send(new ProducerRecord<>("test", "hello", "kafka - " + i));
        Thread.sleep(10000);
    }
}
 
开发者ID:YanXs,项目名称:nighthawk,代码行数:18,代码来源:KafkaProducerTest.java

示例9: getProducer

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
static Producer<String, String> getProducer(KafkaOutputConfiguration configuration) {
    if (producer == null) {
        synchronized (KafkaOutput.class) {
            if (producer != null) {
                return producer;
            }
            Properties props = new Properties();
            props.put("bootstrap.servers", configuration.getHost() + ":" + configuration.getPort());
            props.put("acks", "all");
            props.put("retries", 0);
            props.put("request.required.acks", "0");
            props.put("batch.size", 64);
            props.put("linger.ms", 1);
            props.put("buffer.memory", 1024);
            props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            producer = new KafkaProducer<>(props);
        }
        return producer;
    } else {
        return producer;
    }
}
 
开发者ID:DevOpsStudio,项目名称:Re-Collector,代码行数:24,代码来源:KafkaOutput.java

示例10: send

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@Override
public void send(Long k, byte[] v) {
    KafkaProducer<Long, byte[]> p = getWorker();
    p.initTransactions();
    p.beginTransaction();
    Future<RecordMetadata> res = worker.send(new ProducerRecord<Long, byte[]>(topic, k, v));
    RecordMetadata record;
    try {
        record = res.get();
        offsets.clear();
        offsets.put(new TopicPartition(topic, record.partition()), new OffsetAndMetadata(record.offset()));
        p.sendOffsetsToTransaction(offsets, MallConstants.ORDER_GROUP);
        p.commitTransaction();
    } catch (InterruptedException | ExecutionException e) {
        p.abortTransaction();
    }
}
 
开发者ID:jiumao-org,项目名称:wechat-mall,代码行数:18,代码来源:OrderProducer.java

示例11: sendData

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public void sendData(String data) {

		Properties props = new Properties();
		props.put("bootstrap.servers", "localhost:9092");
		props.put("acks", "all");
		props.put("retries", 0);
		props.put("batch.size", 16384);
		props.put("linger.ms", 1);
		props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

		Producer<String, String> producer = new KafkaProducer<>(props);

		Map<MetricName, ? extends Metric> metrics = producer.metrics();
		System.out.println(metrics);

		for (int i = 0; i < 100; i++)
			producer.send(new ProducerRecord<String, String>("video_view", data));

		producer.close();

	}
 
开发者ID:alokawi,项目名称:spark-cassandra-poc,代码行数:24,代码来源:KafkaDataProducer.java

示例12: shouldWriteThenRead

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
@Test
public void shouldWriteThenRead() throws Exception {

    //Create a consumer
    ConsumerIterator<String, String> it = buildConsumer(Original.topic);

    //Create a producer
    producer = new KafkaProducer<>(producerProps());

    //send a message
    producer.send(new ProducerRecord<>(Original.topic, "message")).get();

    //read it back
    MessageAndMetadata<String, String> messageAndMetadata = it.next();
    String value = messageAndMetadata.message();
    assertThat(value, is("message"));
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:18,代码来源:Original.java

示例13: produceRecords

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private static void produceRecords(String bootstrapServers) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());

    Producer<Long, byte[]> producer = new KafkaProducer<>(properties);

    LongStream.rangeClosed(1, 100).boxed()
            .map(number ->
                    new ProducerRecord<>(
                            TOPIC, //topic
                            number, //key
                            String.format("record-%s", number.toString()).getBytes())) //value
            .forEach(record -> producer.send(record));
    producer.close();
}
 
开发者ID:jeqo,项目名称:talk-kafka-messaging-logs,代码行数:18,代码来源:ProduceConsumeLongByteArrayRecord.java

示例14: start

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
public void start() throws InterruptedException {
  RandomGenerator random = RandomManager.getRandom();

  Properties props = ConfigUtils.keyValueToProperties(
      "bootstrap.servers", "localhost:" + kafkaPort,
      "key.serializer", "org.apache.kafka.common.serialization.StringSerializer",
      "value.serializer", "org.apache.kafka.common.serialization.StringSerializer",
      "compression.type", "gzip",
      "batch.size", 0,
      "acks", 1,
      "max.request.size", 1 << 26 // TODO
  );
  try (Producer<String,String> producer = new KafkaProducer<>(props)) {
    for (int i = 0; i < howMany; i++) {
      Pair<String,String> datum = datumGenerator.generate(i, random);
      ProducerRecord<String,String> record =
          new ProducerRecord<>(topic, datum.getFirst(), datum.getSecond());
      producer.send(record);
      log.debug("Sent datum {} = {}", record.key(), record.value());
      if (intervalMsec > 0) {
        Thread.sleep(intervalMsec);
      }
    }
  }
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:26,代码来源:ProduceData.java

示例15: produceRecords

import org.apache.kafka.clients.producer.KafkaProducer; //导入依赖的package包/类
private static void produceRecords(String bootstrapServers) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());

    Producer<Integer, byte[]> producer = new KafkaProducer<>(properties);

    IntStream.rangeClosed(1, 10000).boxed()
            .map(number ->
                    new ProducerRecord<>(
                            TOPIC,
                            1, //Key
                            KafkaProducerUtil.createMessage(1000))) //Value
            .forEach(record -> {
                producer.send(record);
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            });
    producer.close();
}
 
开发者ID:jeqo,项目名称:talk-kafka-messaging-logs,代码行数:25,代码来源:Retention.java


注:本文中的org.apache.kafka.clients.producer.KafkaProducer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。