当前位置: 首页>>代码示例>>Java>>正文


Java KafkaConsumer.endOffsets方法代码示例

本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.endOffsets方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.endOffsets方法的具体用法?Java KafkaConsumer.endOffsets怎么用?Java KafkaConsumer.endOffsets使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.kafka.clients.consumer.KafkaConsumer的用法示例。


在下文中一共展示了KafkaConsumer.endOffsets方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getCount

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Gets the total message count for the topic.
 * <b>WARNING: Don't use with compacted topics</b>
 */
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
    KafkaConsumer consumer = buildConsumer(kafkaBrokers);
    try {
        @SuppressWarnings("unchecked")
        Map<String, List<PartitionInfo>> topics = consumer.listTopics();
        List<PartitionInfo> partitionInfos = topics.get(topic);
        if (partitionInfos == null) {
            logger.warn("Partition information was not found for topic {}", topic);
            return 0;
        } else {
            Collection<TopicPartition> partitions = new ArrayList<>();
            for (PartitionInfo partitionInfo : partitionInfos) {
                TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
                partitions.add(partition);
            }
            Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
            Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
            return diffOffsets(beginningOffsets, endingOffsets);
        }
    } finally {
        consumer.close();
    }
}
 
开发者ID:Sixt,项目名称:ja-micro,代码行数:29,代码来源:TopicMessageCounter.java

示例2: messagesRemaining

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private static long messagesRemaining(KafkaConsumer<String, String> consumer, TopicPartition partition) {
    long currentPosition = consumer.position(partition);
    Map<TopicPartition, Long> endOffsets = consumer.endOffsets(singleton(partition));
    if (endOffsets.containsKey(partition)) {
        return endOffsets.get(partition) - currentPosition;
    }
    return 0;
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:9,代码来源:TransactionalMessageCopier.java

示例3: main

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER);
  String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC);
  String brokerName = commandLine.getOptionValue(BROKERNAME);
  Set<String> brokerNames = new HashSet<>();
  brokerNames.add(brokerName);

  KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(brokerStatsZk,
      "org.apache.kafka.common.serialization.ByteArrayDeserializer",
      "org.apache.kafka.common.serialization.ByteArrayDeserializer", 1);

  long startTimestampInMillis = System.currentTimeMillis() - 86400 * 1000L;
  Map<TopicPartition, Long> offsets = ReplicaStatsManager.getProcessingStartOffsets(
      kafkaConsumer, brokerStatsTopic, startTimestampInMillis);
  kafkaConsumer.unsubscribe();
  kafkaConsumer.assign(offsets.keySet());
  Map<TopicPartition, Long> latestOffsets = kafkaConsumer.endOffsets(offsets.keySet());
  kafkaConsumer.close();

  Map<Long, BrokerStats> brokerStatsMap = new TreeMap<>();
  for (TopicPartition topicPartition : offsets.keySet()) {
    LOG.info("Start processing {}", topicPartition);
    long startOffset = offsets.get(topicPartition);
    long endOffset = latestOffsets.get(topicPartition);

    List<BrokerStats> statsList = processOnePartition(brokerStatsZk, topicPartition,
        startOffset, endOffset, brokerNames);
    for (BrokerStats brokerStats : statsList) {
      brokerStatsMap.put(brokerStats.getTimestamp(), brokerStats);
    }
    LOG.info("Finished processing {}, retrieved {} records", topicPartition, statsList.size());
  }

  for (Map.Entry<Long, BrokerStats> entry: brokerStatsMap.entrySet()) {
    System.out.println(entry.getKey() + " : " + entry.getValue());
  }
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:39,代码来源:BrokerStatsFilter.java

示例4: getProcessingStartOffsets

import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
 * Find the start offsets for the processing windows. We uses kafka 0.10.1.1 that does not support
 * KafkaConsumer.
 */
public static Map<TopicPartition, Long> getProcessingStartOffsets(KafkaConsumer kafkaConsumer,
                                                                  String brokerStatsTopic,
                                                                  long startTimestampInMillis) {
  List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(brokerStatsTopic);
  LOG.info("Get partition info for {} : {} partitions", brokerStatsTopic, partitionInfos.size());
  List<TopicPartition> topicPartitions = partitionInfos.stream()
      .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
      .collect(Collectors.toList());

  Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitions);
  Map<TopicPartition, Long> beginningOffsets = kafkaConsumer.beginningOffsets(topicPartitions);
  Map<TopicPartition, Long> offsets = new HashMap<>();

  for (TopicPartition tp : topicPartitions) {
    kafkaConsumer.unsubscribe();
    LOG.info("assigning {} to kafkaconsumer", tp);
    List<TopicPartition> tps = new ArrayList<>();
    tps.add(tp);

    kafkaConsumer.assign(tps);
    long endOffset = endOffsets.get(tp);
    long beginningOffset = beginningOffsets.get(tp);
    long offset = Math.max(endOffsets.get(tp) - 10, beginningOffset);
    ConsumerRecord<byte[], byte[]> record = retrieveOneMessage(kafkaConsumer, tp, offset);
    BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record);
    if (brokerStats != null) {
      long timestamp = brokerStats.getTimestamp();
      while (timestamp > startTimestampInMillis) {
        offset = Math.max(beginningOffset, offset - 5000);
        record = retrieveOneMessage(kafkaConsumer, tp, offset);
        brokerStats = OperatorUtil.deserializeBrokerStats(record);
        if (brokerStats == null) {
          break;
        }
        timestamp = brokerStats.getTimestamp();
      }
    }
    offsets.put(tp, offset);
    LOG.info("{}: offset = {}, endOffset = {}, # of to-be-processed messages = {}",
        tp, offset, endOffset, endOffset - offset);
  }
  return offsets;
}
 
开发者ID:pinterest,项目名称:doctorkafka,代码行数:48,代码来源:ReplicaStatsManager.java


注:本文中的org.apache.kafka.clients.consumer.KafkaConsumer.endOffsets方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。