本文整理匯總了Java中org.apache.kafka.clients.consumer.KafkaConsumer.listTopics方法的典型用法代碼示例。如果您正苦於以下問題:Java KafkaConsumer.listTopics方法的具體用法?Java KafkaConsumer.listTopics怎麽用?Java KafkaConsumer.listTopics使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.clients.consumer.KafkaConsumer
的用法示例。
在下文中一共展示了KafkaConsumer.listTopics方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getCount
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
/**
* Gets the total message count for the topic.
* <b>WARNING: Don't use with compacted topics</b>
*/
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
KafkaConsumer consumer = buildConsumer(kafkaBrokers);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
logger.warn("Partition information was not found for topic {}", topic);
return 0;
} else {
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
partitions.add(partition);
}
Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
return diffOffsets(beginningOffsets, endingOffsets);
}
} finally {
consumer.close();
}
}
示例2: verifyTopicsExist
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public boolean verifyTopicsExist(String kafkaBrokers, Set<String> requiredTopics,
boolean checkPartitionCounts) {
Properties props = new Properties();
props.put("bootstrap.servers", kafkaBrokers);
props.put("group.id", UUID.randomUUID().toString());
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
KafkaConsumer consumer = new KafkaConsumer(props);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
Set<Integer> partitionCount = new HashSet<>();
for (String requiredTopic : requiredTopics) {
List<PartitionInfo> partitions = topics.get(requiredTopic);
if (partitions == null) {
logger.info("Required kafka topic {} not present", requiredTopic);
return false;
}
partitionCount.add(partitions.size());
}
if (checkPartitionCounts && partitionCount.size() > 1) {
logger.warn("Partition count mismatch in topics {}",
Arrays.toString(requiredTopics.toArray()));
return false;
}
return true;
} finally {
consumer.close();
}
}
示例3: run
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
String group = "kafka-insight-logOffsetListener";
int sleepTime = 60000;
KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;
while (true) {
try {
if (null == kafkaConsumer) {
kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
}
Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
for (PartitionInfo partitionInfo : partitionInfoList) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
kafkaConsumer.assign(topicPartitions);
kafkaConsumer.seekToEnd(topicPartitions);
Long logEndOffset = kafkaConsumer.position(topicPartition);
logEndOffsetMap.put(topicPartition, logEndOffset);
}
}
Thread.sleep(sleepTime);
} catch (Exception e) {
e.printStackTrace();
if (null != kafkaConsumer) {
kafkaConsumer.close();
kafkaConsumer = null;
}
}
}
}
示例4: main
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
public static void main(String[] args) {
Map<String, List<PartitionInfo>> topics;
Properties props = new Properties();
props.put("bootstrap.servers", "kafka-1:9092,kafka-2:9092,kafka-3:9092");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
topics = consumer.listTopics();
System.out.println("******************************************");
System.out.println(" L I S T T O P I C S ");
System.out.println("******************************************\n");
for (Map.Entry<String, List<PartitionInfo>> topic : topics.entrySet()) {
System.out.println("Topic: "+ topic.getKey());
System.out.println("Value: " + topic.getValue() + "\n");
}
}
示例5: getTopicPartitionInfoMap
import org.apache.kafka.clients.consumer.KafkaConsumer; //導入方法依賴的package包/類
private Map<String, List<PartitionInfo>> getTopicPartitionInfoMap() {
KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl);
Map<String, List<PartitionInfo>> topicPartitonInfoMap = kafkaConsumer.listTopics();
return topicPartitonInfoMap;
}