当前位置: 首页>>代码示例>>Java>>正文


Java ProducerRecord类代码示例

本文整理汇总了Java中org.apache.kafka.clients.producer.ProducerRecord的典型用法代码示例。如果您正苦于以下问题:Java ProducerRecord类的具体用法?Java ProducerRecord怎么用?Java ProducerRecord使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ProducerRecord类属于org.apache.kafka.clients.producer包,在下文中一共展示了ProducerRecord类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWithString

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Ignore
@Test
public void testWithString() throws Exception {
    KafkaComponent kafka = createComponent();
    
    Mono<List<String>> receive = Flux.from(kafka.from(TOPIC2, String.class))
        .take(2)
        .collectList();
    
    Subscriber<ProducerRecord> toTopic = kafka.to(TOPIC2, ProducerRecord.class);
    Flux.just(new ProducerRecord<String, String>(TOPIC2, "1", "test"),
              new ProducerRecord<String, String>(TOPIC2, "1", "test2"))
        .subscribe(toTopic);

    List<String> received = receive.block(Duration.ofSeconds(10));
    Assert.assertEquals(2, received.size());
    Assert.assertEquals("test", received.get(0));
    Assert.assertEquals("test2", received.get(1));
}
 
开发者ID:cschneider,项目名称:reactive-components,代码行数:20,代码来源:TestKafka.java

示例2: testWithConsumerRecord

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Ignore
@Test
public void testWithConsumerRecord() throws Exception {
    KafkaComponent kafka = createComponent();
    
    Mono<List<Object>> receive = Flux.from(kafka.from(TOPIC1, ConsumerRecord.class))
        .map(record -> record.value())
        .take(2)
        .collectList();

    Subscriber<ProducerRecord> toTopic = kafka.to(TOPIC1, ProducerRecord.class);
    Flux.just(new ProducerRecord<String, String>(TOPIC1, "1", "test"),
              new ProducerRecord<String, String>(TOPIC1, "1", "test2"))
        .subscribe(toTopic);

    List<Object> received = receive.block(Duration.ofSeconds(10));
    Assert.assertEquals(2, received.size());
    Assert.assertEquals("test", received.get(0));
}
 
开发者ID:cschneider,项目名称:reactive-components,代码行数:20,代码来源:TestKafka.java

示例3: send

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
  /*
  // Create wrappedRecord because headers can be read only in record (if record is sent second time)
  ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(),
      record.partition(),
      record.timestamp(),
      record.key(),
      record.value(),
      record.headers());
  */

  try (Scope scope = buildAndInjectSpan(record)) {
    Callback wrappedCallback = new TracingCallback(callback, scope);
    return producer.send(record, wrappedCallback);
  }
}
 
开发者ID:opentracing-contrib,项目名称:java-kafka-client,代码行数:18,代码来源:TracingKafkaProducer.java

示例4: run

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
public void run() {
    int messageNo = 1;
    while (true) {
        String messageStr = "Message_" + messageNo;
        long startTime = System.currentTimeMillis();
        if (isAsync) { // Send asynchronously
            producer.send(new ProducerRecord<>(topic,
                messageNo,
                messageStr), new DemoCallBack(startTime, messageNo, messageStr));
        } else { // Send synchronously
            try {
                producer.send(new ProducerRecord<>(topic,
                    messageNo,
                    messageStr)).get();
                System.out.println("Sent message: (" + messageNo + ", " + messageStr + ")");
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
        ++messageNo;
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:Producer.java

示例5: send

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
                              final FailedDeliveryCallback<E> failedDeliveryCallback) {
    try {
        producer.send(record, new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null) {
                    failedDeliveryCallback.onFailedDelivery(event, exception);
                }
            }
        });
        return true;
    } catch (BufferExhaustedException e) {
        failedDeliveryCallback.onFailedDelivery(event, e);
        return false;
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:19,代码来源:AsynchronousDeliveryStrategy.java

示例6: start

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
public void start() throws InterruptedException {
  RandomGenerator random = RandomManager.getRandom();

  Properties props = ConfigUtils.keyValueToProperties(
      "bootstrap.servers", "localhost:" + kafkaPort,
      "key.serializer", "org.apache.kafka.common.serialization.StringSerializer",
      "value.serializer", "org.apache.kafka.common.serialization.StringSerializer",
      "compression.type", "gzip",
      "batch.size", 0,
      "acks", 1,
      "max.request.size", 1 << 26 // TODO
  );
  try (Producer<String,String> producer = new KafkaProducer<>(props)) {
    for (int i = 0; i < howMany; i++) {
      Pair<String,String> datum = datumGenerator.generate(i, random);
      ProducerRecord<String,String> record =
          new ProducerRecord<>(topic, datum.getFirst(), datum.getSecond());
      producer.send(record);
      log.debug("Sent datum {} = {}", record.key(), record.value());
      if (intervalMsec > 0) {
        Thread.sleep(intervalMsec);
      }
    }
  }
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:26,代码来源:ProduceData.java

示例7: invoke

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
    switch (method.getName()) {
        case SEND_METHOD_NAME: {
            ProducerRecord record = (ProducerRecord) args[0];

            args[0] = new ProducerRecord<>(
                    BaseIntegrationTest.adjustTopicNameForTest(record.topic()),
                    record.partition(),
                    record.timestamp(),
                    record.key(),
                    record.value()
            );
            break;
        }
        case PARTITIONS_FOR_METHOD_NAME: {
            args[0] = BaseIntegrationTest.adjustTopicNameForTest((String) args[0]);
            break;
        }
    }
    return method.invoke(producer, args);
}
 
开发者ID:epam,项目名称:Lagerta,代码行数:23,代码来源:KafkaFactoryForTests.java

示例8: main

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
public static void main(String[] args) {

        Map<String, Object> config = new HashMap<String, Object>();
        config.put("partitioner.class", "com.wngn.kafka.SimpleKeyPartition");
        LatestProducer producer = LatestProducer.getInstance(ProducerConstants.TOPIC_KAFKA_TEST, config);
        ProducerRecord<String, String> record = null;
        long index = 0L;
        boolean controller = true;
        while (controller) {
            controller = false;
            index++;
            System.out.println(index + "------------");
            try {
                String message = "message_" + index;
                RecordMetadata recordMetadata = producer.sendWithSync("1", message);
                System.out.format("PARTITION: %d OFFSET: %d\n", recordMetadata.partition(), recordMetadata.offset());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
        producer.close();
    }
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:23,代码来源:LatestProducer.java

示例9: shouldNotBeLoggingEnabledStoreWhenLoggingNotEnabled

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Test
public void shouldNotBeLoggingEnabledStoreWhenLoggingNotEnabled() throws Exception {
    store = createStore(false, false);
    final List<ProducerRecord> logged = new ArrayList<>();
    final NoOpRecordCollector collector = new NoOpRecordCollector() {
        @Override
        public <K, V> void send(final String topic,
                                K key,
                                V value,
                                Integer partition,
                                Long timestamp,
                                Serializer<K> keySerializer,
                                Serializer<V> valueSerializer) {
            logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
        }
    };
    final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(),
                                                                  Serdes.String(),
                                                                  Serdes.String(),
                                                                  collector,
                                                                  cache);
    context.setTime(1);
    store.init(context, store);
    store.put("a", "b");
    assertTrue(logged.isEmpty());
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:RocksDBKeyValueStoreSupplierTest.java

示例10: nextBatch

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
public ConsumerRecords<K, V> nextBatch() {
    List<Future<RecordMetadata>> markerSends = new ArrayList<>();

    // 1. Get messages from topic, in batches
    ConsumerRecords<K, V> records = msgConsumer.poll(msgPollTimeout);
    for (ConsumerRecord<K, V> record : records) {
        // 2. Write a "start" marker. Collecting the future responses.
        markerSends.add(markerProducer.send(
                new ProducerRecord<>(config.getMarkerTopic(),
                        MarkerKey.fromRecord(record),
                        new StartMarker(config.getMsgTimeoutMs()))));
    }

    // Waiting for a confirmation that each start marker has been sent
    markerSends.forEach(f -> {
        try { f.get(); } catch (Exception e) { throw new RuntimeException(e); }
    });

    // 3. after all start markers are sent, commit offsets. This needs to be done as close to writing the
    // start marker as possible, to minimize the number of double re-processed messages in case of failure.
    msgConsumer.commitSync();

    return records;
}
 
开发者ID:softwaremill,项目名称:kmq,代码行数:25,代码来源:KmqClient.java

示例11: produceRecords

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
private static void produceRecords(String bootstrapServers) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());

    Producer<Long, byte[]> producer = new KafkaProducer<>(properties);

    LongStream.rangeClosed(1, 100).boxed()
            .map(number ->
                    new ProducerRecord<>(
                            TOPIC, //topic
                            number, //key
                            String.format("record-%s", number.toString()).getBytes())) //value
            .forEach(record -> producer.send(record));
    producer.close();
}
 
开发者ID:jeqo,项目名称:talk-kafka-messaging-logs,代码行数:18,代码来源:ProduceConsumeLongByteArrayRecord.java

示例12: publish

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
public void publish(BrokerStats brokerStats) throws IOException {
  try {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    BinaryEncoder binaryEncoder = avroEncoderFactory.binaryEncoder(stream, null);

    avroEventWriter.write(brokerStats, binaryEncoder);
    binaryEncoder.flush();
    IOUtils.closeQuietly(stream);

    String key = brokerStats.getName() + "_" + System.currentTimeMillis();
    int numPartitions = kafkaProducer.partitionsFor(destTopic).size();
    int partition = brokerStats.getId() % numPartitions;

    Future<RecordMetadata> future = kafkaProducer.send(
        new ProducerRecord(destTopic, partition, key.getBytes(), stream.toByteArray()));
    future.get();

    OpenTsdbMetricConverter.incr("kafka.stats.collector.success", 1, "host=" + HOSTNAME);
  } catch (Exception e) {
    LOG.error("Failure in publish stats", e);
    OpenTsdbMetricConverter.incr("kafka.stats.collector.failure", 1, "host=" + HOSTNAME);
    throw new RuntimeException("Avro serialization failure", e);
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:25,代码来源:KafkaAvroPublisher.java

示例13: sendStringMessage

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
public static void  sendStringMessage() throws Exception{
	Properties props = new Properties();
	props.put("bootstrap.servers", servers);
	props.put("acks", "all");
	props.put("retries", 0);
	props.put("batch.size", 16384);
	props.put("linger.ms", 1);
	props.put("buffer.memory", 33554432);
	props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
	props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

	Producer<String, String> producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);

	//没有任何分区,默认1个分区,发送消息
	int i=0;
	while(i<1000){
		Thread.sleep(1000L);
		String message = "zhangsan"+i;
		producer.send(new ProducerRecord<>("NL_U_APP_ALARM_APP_STRING",message));
		i++;
		producer.flush();
	}
	producer.close();
}
 
开发者ID:jacktomcat,项目名称:spark2.0,代码行数:25,代码来源:KafkaSendMessage.java

示例14: checkMulticast

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
private void checkMulticast(KafkaUsage usage, String topic, KafkaSource<Integer> source) {
  List<Integer> resultsA = new ArrayList<>();
  List<Integer> resultsB = new ArrayList<>();
  source
    .transformPayload(i -> i + 1)
    .to(Sink.forEachPayload(resultsB::add));

  source
    .transformPayload(i -> i + 1)
    .to(Sink.forEachPayload(resultsA::add));

  AtomicInteger counter = new AtomicInteger();
  usage.produceIntegers(10, null,
    () -> new ProducerRecord<>(topic, counter.getAndIncrement()));

  await().atMost(1, TimeUnit.MINUTES).until(() -> resultsA.size() >= 10);
  await().atMost(1, TimeUnit.MINUTES).until(() -> resultsB.size() >= 10);
  assertThat(resultsA).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
  assertThat(resultsB).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
 
开发者ID:cescoffier,项目名称:fluid,代码行数:21,代码来源:KafkaSourceTest.java

示例15: testSource

import org.apache.kafka.clients.producer.ProducerRecord; //导入依赖的package包/类
@Test
public void testSource() throws InterruptedException {
  KafkaUsage usage = new KafkaUsage();
  String topic = UUID.randomUUID().toString();
  List<Integer> results = new ArrayList<>();
  KafkaSource<Integer> source = new KafkaSource<>(vertx,
    getKafkaConfig()
      .put("topic", topic)
      .put("value.serializer", IntegerSerializer.class.getName())
      .put("value.deserializer", IntegerDeserializer.class.getName())
  );
  source
    .transformPayload(i -> i + 1)
    .to(Sink.forEachPayload(results::add));

  AtomicInteger counter = new AtomicInteger();
  usage.produceIntegers(10, null,
    () -> new ProducerRecord<>(topic, counter.getAndIncrement()));

  await().atMost(1, TimeUnit.MINUTES).until(() -> results.size() >= 10);
  assertThat(results).containsExactly(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
}
 
开发者ID:cescoffier,项目名称:fluid,代码行数:23,代码来源:KafkaSourceTest.java


注:本文中的org.apache.kafka.clients.producer.ProducerRecord类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。