当前位置: 首页>>代码示例>>Java>>正文


Java ConsumerRecords.records方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.ConsumerRecords.records方法的典型用法代码示例。如果您正苦于以下问题:Java ConsumerRecords.records方法的具体用法?Java ConsumerRecords.records怎么用?Java ConsumerRecords.records使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.ConsumerRecords的用法示例。


在下文中一共展示了ConsumerRecords.records方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: onConsume

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
@Override
@SuppressWarnings("deprecation")
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {

    // This will ensure that we get the cluster metadata when onConsume is called for the first time
    // as subsequent compareAndSet operations will fail.
    CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());

    Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> lst = new ArrayList<>();
        for (ConsumerRecord<String, String> record: records.records(tp)) {
            lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(),
                                         record.timestamp(), record.timestampType(),
                                         record.checksum(), record.serializedKeySize(),
                                         record.serializedValueSize(),
                                         record.key(), record.value().toUpperCase(Locale.ROOT)));
        }
        recordMap.put(tp, lst);
    }
    return new ConsumerRecords<String, String>(recordMap);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:MockConsumerInterceptor.java

示例2: retrieveOneMessage

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
private static ConsumerRecord<byte[], byte[]> retrieveOneMessage(KafkaConsumer kafkaConsumer,
                                                                 TopicPartition topicPartition,
                                                                 long offset) {
  kafkaConsumer.seek(topicPartition, offset);
  ConsumerRecords<byte[], byte[]> records;
  ConsumerRecord<byte[], byte[]> record = null;
  while (record == null) {
    records = kafkaConsumer.poll(100);
    if (!records.isEmpty()) {
      LOG.debug("records.count() = {}", records.count());
      List<ConsumerRecord<byte[], byte[]>> reclist = records.records(topicPartition);
      if (reclist != null && !reclist.isEmpty()) {
        record = reclist.get(0);
        break;
      } else {
        LOG.info("recList is null or empty");
      }
    }
  }
  return record;
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:22,代码来源:ReplicaStatsManager.java

示例3: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    // 消费者订阅的topic, 可同时订阅多个
    consumer.subscribe(Arrays.asList("kafka-test"));

    final int minBatchSize = 200;
    List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:35,代码来源:ComsumerDemo3.java

示例4: main

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
public static void main(String[] args) {
    Map<String, Object> configs = new HashMap<String, Object>();
    // bootstrap.servers指定一个或多个broker,不用指定全部的broker,它将自动发现集群中的其余的borker。
    configs.put("bootstrap.servers", "192.168.0.107:9092,192.168.0.108:9092,192.168.0.109:9092");
    configs.put("group.id", "kafka-test");
    // 是否自动确认offset
    configs.put("enable.auto.commit", "false");
    // 自动确认offset的时间间隔
    configs.put("auto.commit.interval.ms", "1000");
    configs.put("session.timeout.ms", "30000");

    configs.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    configs.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(configs);
    String topic = "kafka-test";
    TopicPartition partition0 = new TopicPartition(topic, 0);
    TopicPartition partition1 = new TopicPartition(topic, 1);
    consumer.assign(Arrays.asList(partition0, partition1));
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            for (ConsumerRecord<String, String> record : partitionRecords) {
                System.out.println(record.offset() + ": " + record.value());
            }
            /* 同步确认某个分区的特定offset */
            long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
            consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
        }
    }
}
 
开发者ID:wngn123,项目名称:wngn-jms-kafka,代码行数:33,代码来源:ComsumerDemo4.java

示例5: onRecordsReceived

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
private Map<TopicPartition, OffsetAndMetadata> onRecordsReceived(ConsumerRecords<String, String> records) {
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();

    List<RecordSetSummary> summaries = new ArrayList<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);

        if (hasMessageLimit() && consumedMessages + partitionRecords.size() > maxMessages)
            partitionRecords = partitionRecords.subList(0, maxMessages - consumedMessages);

        if (partitionRecords.isEmpty())
            continue;

        long minOffset = partitionRecords.get(0).offset();
        long maxOffset = partitionRecords.get(partitionRecords.size() - 1).offset();

        offsets.put(tp, new OffsetAndMetadata(maxOffset + 1));
        summaries.add(new RecordSetSummary(tp.topic(), tp.partition(),
                partitionRecords.size(), minOffset, maxOffset));

        if (verbose) {
            for (ConsumerRecord<String, String> record : partitionRecords)
                printJson(new RecordData(record));
        }

        consumedMessages += partitionRecords.size();
        if (isFinished())
            break;
    }

    printJson(new RecordsConsumed(records.count(), summaries));
    return offsets;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:34,代码来源:VerifiableConsumer.java

示例6: testStartJob

import org.apache.kafka.clients.consumer.ConsumerRecords; //导入方法依赖的package包/类
@Test
public void testStartJob() throws Exception {
  try (MiniKafkaCluster kafkaCluster = new MiniKafkaCluster.Builder().newServer("0").build();
       MiniAthenaXCluster cluster = new MiniAthenaXCluster(StartJobITest.class.getSimpleName())) {
    kafkaCluster.start();
    setUpKafka(kafkaCluster);
    cluster.start();
    AthenaXConfiguration conf = generateConf(cluster);
    ServerContext.INSTANCE.initialize(conf);
    ServerContext.INSTANCE.start();
    try (WebServer server = new WebServer(URI.create("http://localhost:0"))) {
      server.start();
      Configuration.getDefaultApiClient().setBasePath(String.format("http://localhost:%d%s", server.port(), WebServer.BASE_PATH));
      LOG.info("AthenaX server listening on http://localhost:{}", server.port());

      JobsApi api = new JobsApi();
      String uuid = api.allocateNewJob().getJobUuid();
      JobDefinitionDesiredstate state = new JobDefinitionDesiredstate()
          .clusterId("foo")
          .resource(new JobDefinitionResource().vCores(1L).memory(2048L));
      JobDefinition job = new JobDefinition()
          .query("SELECT * FROM input.foo")
          .addDesiredStateItem(state);
      api.updateJob(UUID.fromString(uuid), job);

      try (KafkaConsumer<byte[], byte[]> consumer = getConsumer("observer", brokerAddress())) {
        consumer.subscribe(Collections.singletonList(DEST_TOPIC));
        boolean found = false;
        while (!found) {
          ConsumerRecords<byte[], byte[]> records = consumer.poll(1000);
          for (ConsumerRecord<byte[], byte[]> r : records.records(DEST_TOPIC)) {
            @SuppressWarnings("unchecked")
            Map<String, Object> m = MAPPER.readValue(r.value(), Map.class);
            if ((Integer) m.get("id") == 2) {
              found = true;
            }
          }
        }
        ServerContext.INSTANCE.executor().shutdown();
        ServerContext.INSTANCE.instanceManager().close();
      }
    }
  }
}
 
开发者ID:uber,项目名称:AthenaX,代码行数:45,代码来源:StartJobITest.java


注:本文中的org.apache.kafka.clients.consumer.ConsumerRecords.records方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。