本文整理汇总了Java中org.apache.kafka.clients.consumer.KafkaConsumer.listTopics方法的典型用法代码示例。如果您正苦于以下问题:Java KafkaConsumer.listTopics方法的具体用法?Java KafkaConsumer.listTopics怎么用?Java KafkaConsumer.listTopics使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.clients.consumer.KafkaConsumer
的用法示例。
在下文中一共展示了KafkaConsumer.listTopics方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getCount
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* Gets the total message count for the topic.
* <b>WARNING: Don't use with compacted topics</b>
*/
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
KafkaConsumer consumer = buildConsumer(kafkaBrokers);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
logger.warn("Partition information was not found for topic {}", topic);
return 0;
} else {
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
partitions.add(partition);
}
Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
return diffOffsets(beginningOffsets, endingOffsets);
}
} finally {
consumer.close();
}
}
示例2: verifyTopicsExist
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public boolean verifyTopicsExist(String kafkaBrokers, Set<String> requiredTopics,
boolean checkPartitionCounts) {
Properties props = new Properties();
props.put("bootstrap.servers", kafkaBrokers);
props.put("group.id", UUID.randomUUID().toString());
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
KafkaConsumer consumer = new KafkaConsumer(props);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
Set<Integer> partitionCount = new HashSet<>();
for (String requiredTopic : requiredTopics) {
List<PartitionInfo> partitions = topics.get(requiredTopic);
if (partitions == null) {
logger.info("Required kafka topic {} not present", requiredTopic);
return false;
}
partitionCount.add(partitions.size());
}
if (checkPartitionCounts && partitionCount.size() > 1) {
logger.warn("Partition count mismatch in topics {}",
Arrays.toString(requiredTopics.toArray()));
return false;
}
return true;
} finally {
consumer.close();
}
}
示例3: run
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
String group = "kafka-insight-logOffsetListener";
int sleepTime = 60000;
KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;
while (true) {
try {
if (null == kafkaConsumer) {
kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
}
Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
for (PartitionInfo partitionInfo : partitionInfoList) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
kafkaConsumer.assign(topicPartitions);
kafkaConsumer.seekToEnd(topicPartitions);
Long logEndOffset = kafkaConsumer.position(topicPartition);
logEndOffsetMap.put(topicPartition, logEndOffset);
}
}
Thread.sleep(sleepTime);
} catch (Exception e) {
e.printStackTrace();
if (null != kafkaConsumer) {
kafkaConsumer.close();
kafkaConsumer = null;
}
}
}
}
示例4: main
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
public static void main(String[] args) {
Map<String, List<PartitionInfo>> topics;
Properties props = new Properties();
props.put("bootstrap.servers", "kafka-1:9092,kafka-2:9092,kafka-3:9092");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
topics = consumer.listTopics();
System.out.println("******************************************");
System.out.println(" L I S T T O P I C S ");
System.out.println("******************************************\n");
for (Map.Entry<String, List<PartitionInfo>> topic : topics.entrySet()) {
System.out.println("Topic: "+ topic.getKey());
System.out.println("Value: " + topic.getValue() + "\n");
}
}
示例5: getTopicPartitionInfoMap
import org.apache.kafka.clients.consumer.KafkaConsumer; //导入方法依赖的package包/类
private Map<String, List<PartitionInfo>> getTopicPartitionInfoMap() {
KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl);
Map<String, List<PartitionInfo>> topicPartitonInfoMap = kafkaConsumer.listTopics();
return topicPartitonInfoMap;
}