本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.endOffsets方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.endOffsets方法的具体用法?Java KafkaConsumer.endOffsets怎么用?Java KafkaConsumer.endOffsets使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.clients.consumer.KafkaConsumer
的用法示例。
在下文中一共展示了KafkaConsumer.endOffsets方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getCount
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* Gets the total message count for the topic.
* <b>WARNING: Don't use with compacted topics</b>
*/
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
KafkaConsumer consumer = buildConsumer(kafkaBrokers);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
logger.warn("Partition information was not found for topic {}", topic);
return 0;
} else {
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
partitions.add(partition);
}
Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
return diffOffsets(beginningOffsets, endingOffsets);
}
} finally {
consumer.close();
}
}
示例2: messagesRemaining
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private static long messagesRemaining(KafkaConsumer<String, String> consumer, TopicPartition partition) {
long currentPosition = consumer.position(partition);
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(singleton(partition));
if (endOffsets.containsKey(partition)) {
return endOffsets.get(partition) - currentPosition;
}
return 0;
}
示例3: main
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
CommandLine commandLine = parseCommandLine(args);
String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER);
String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC);
String brokerName = commandLine.getOptionValue(BROKERNAME);
Set<String> brokerNames = new HashSet<>();
brokerNames.add(brokerName);
KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(brokerStatsZk,
"org.apache.kafka.common.serialization.ByteArrayDeserializer",
"org.apache.kafka.common.serialization.ByteArrayDeserializer", 1);
long startTimestampInMillis = System.currentTimeMillis() - 86400 * 1000L;
Map<TopicPartition, Long> offsets = ReplicaStatsManager.getProcessingStartOffsets(
kafkaConsumer, brokerStatsTopic, startTimestampInMillis);
kafkaConsumer.unsubscribe();
kafkaConsumer.assign(offsets.keySet());
Map<TopicPartition, Long> latestOffsets = kafkaConsumer.endOffsets(offsets.keySet());
kafkaConsumer.close();
Map<Long, BrokerStats> brokerStatsMap = new TreeMap<>();
for (TopicPartition topicPartition : offsets.keySet()) {
LOG.info("Start processing {}", topicPartition);
long startOffset = offsets.get(topicPartition);
long endOffset = latestOffsets.get(topicPartition);
List<BrokerStats> statsList = processOnePartition(brokerStatsZk, topicPartition,
startOffset, endOffset, brokerNames);
for (BrokerStats brokerStats : statsList) {
brokerStatsMap.put(brokerStats.getTimestamp(), brokerStats);
}
LOG.info("Finished processing {}, retrieved {} records", topicPartition, statsList.size());
}
for (Map.Entry<Long, BrokerStats> entry: brokerStatsMap.entrySet()) {
System.out.println(entry.getKey() + " : " + entry.getValue());
}
}
示例4: getProcessingStartOffsets
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* Find the start offsets for the processing windows. We uses kafka 0.10.1.1 that does not support
* KafkaConsumer.
*/
public static Map<TopicPartition, Long> getProcessingStartOffsets(KafkaConsumer kafkaConsumer,
String brokerStatsTopic,
long startTimestampInMillis) {
List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(brokerStatsTopic);
LOG.info("Get partition info for {} : {} partitions", brokerStatsTopic, partitionInfos.size());
List<TopicPartition> topicPartitions = partitionInfos.stream()
.map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
.collect(Collectors.toList());
Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitions);
Map<TopicPartition, Long> beginningOffsets = kafkaConsumer.beginningOffsets(topicPartitions);
Map<TopicPartition, Long> offsets = new HashMap<>();
for (TopicPartition tp : topicPartitions) {
kafkaConsumer.unsubscribe();
LOG.info("assigning {} to kafkaconsumer", tp);
List<TopicPartition> tps = new ArrayList<>();
tps.add(tp);
kafkaConsumer.assign(tps);
long endOffset = endOffsets.get(tp);
long beginningOffset = beginningOffsets.get(tp);
long offset = Math.max(endOffsets.get(tp) - 10, beginningOffset);
ConsumerRecord<byte[], byte[]> record = retrieveOneMessage(kafkaConsumer, tp, offset);
BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record);
if (brokerStats != null) {
long timestamp = brokerStats.getTimestamp();
while (timestamp > startTimestampInMillis) {
offset = Math.max(beginningOffset, offset - 5000);
record = retrieveOneMessage(kafkaConsumer, tp, offset);
brokerStats = OperatorUtil.deserializeBrokerStats(record);
if (brokerStats == null) {
break;
}
timestamp = brokerStats.getTimestamp();
}
}
offsets.put(tp, offset);
LOG.info("{}: offset = {}, endOffset = {}, # of to-be-processed messages = {}",
tp, offset, endOffset, endOffset - offset);
}
return offsets;
}