本文整理汇总了Java中kafka.javaapi.TopicMetadataResponse.topicsMetadata方法的典型用法代码示例。如果您正苦于以下问题:Java TopicMetadataResponse.topicsMetadata方法的具体用法?Java TopicMetadataResponse.topicsMetadata怎么用?Java TopicMetadataResponse.topicsMetadata使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.javaapi.TopicMetadataResponse
的用法示例。
在下文中一共展示了TopicMetadataResponse.topicsMetadata方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPartitionMetadata
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
public static PartitionMetadata getPartitionMetadata(final SimpleConsumer consumer, final List<String> topics, final int partitionId) {
try {
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> topicMetadataList = resp.topicsMetadata();
for (TopicMetadata metaData : topicMetadataList) {
for (PartitionMetadata part : metaData.partitionsMetadata()) {
if (part.partitionId() == partitionId) {
return part;
}
}
}
} catch (Exception e) {
LOG.warn("Unable to fetch partition meta data from host[{}:{}] [{}:{}]", consumer.host(), consumer.port(), topics, partitionId, e);
}
return null;
}
示例2: doesTopicExist
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
public boolean doesTopicExist(String topic) {
log.debug("Does Topic {} exist?", topic);
SimpleConsumer consumer = new SimpleConsumer(host, port, soTimeout, bufferSize, clientId);
List<String> topics = new ArrayList<>();
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metadata = response.topicsMetadata();
for (TopicMetadata item : metadata) {
if (item.topic().equals(topic)) {
log.debug("Found Topic {}.", topic);
return true;
}
}
log.debug("Did not find Topic {}.", topic);
return false;
}
示例3: findTopicMetadata
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
public TopicMetadata findTopicMetadata(final String topic, int retries) throws Exception {
Operation<TopicMetadata> findTopicOperation = new Operation<TopicMetadata>() {
@Override
public TopicMetadata execute() throws Exception {
List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> topicMetadatas = resp.topicsMetadata();
if (topicMetadatas.size() != 1) {
throw new Exception("Expect to find 1 topic " + topic + ", but found " + topicMetadatas.size());
}
return topicMetadatas.get(0);
}
};
return execute(findTopicOperation, retries);
}
示例4: getTopicMetaData
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
/** 根据Topic列表返回TopicMetaData信息
*
* @param topics
* @return */
public static List<TopicMetadata> getTopicMetaData(List<String> topics) {
SimpleConsumer simpleConsumer = SimpleKafkaHelper.getDefaultSimpleConsumer();
TopicMetadataRequest metaDataRequest = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = simpleConsumer.send(metaDataRequest);
List<TopicMetadata> metadatas = resp.topicsMetadata();
return metadatas;
}
示例5: listTopics
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
public List<String> listTopics() {
log.debug("List Topics");
SimpleConsumer consumer = new SimpleConsumer(host, port, soTimeout, bufferSize, clientId);
List<String> topics = new ArrayList<>();
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metadata = response.topicsMetadata();
for (TopicMetadata item : metadata) {
topics.add(item.topic());
}
log.debug("Found {} Topics", topics.size());
return topics;
}
示例6: findLeader
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
private HostAndPort findLeader(TopicPartition topicPartition) {
SimpleConsumer consumer = null;
try {
LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition());
consumer = createConsumer(
mConfig.getKafkaSeedBrokerHost(),
mConfig.getKafkaSeedBrokerPort(),
"leaderLookup");
List<String> topics = new ArrayList<String>();
topics.add(topicPartition.getTopic());
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metaData = response.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == topicPartition.getPartition()) {
return HostAndPort.fromParts(part.leader().host(), part.leader().port());
}
}
}
} finally {
if (consumer != null) {
consumer.close();
}
}
return null;
}
示例7: getNumPartitionsForTopic
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
@Override
public Integer getNumPartitionsForTopic(String topic){
TopicMetadataRequest topicRequest = new TopicMetadataRequest(Arrays.asList(topic));
TopicMetadataResponse topicResponse = simpleConsumer.send(topicRequest);
for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
if (topic.equals(topicMetadata.topic())) {
int partitionSize = topicMetadata.partitionsMetadata().size();
logger.debug("Partition size found ({}) for {} topic", partitionSize, topic);
return partitionSize;
}
}
logger.warn("Metadata info not found!. TOPIC {}", topic);
return null;
}
示例8: getSplits
import kafka.javaapi.TopicMetadataResponse; //导入方法依赖的package包/类
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout)
{
KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata part : metadata.partitionsMetadata()) {
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
Broker leader = part.leader();
if (leader == null) { // Leader election going on...
log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
continue;
}
HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
// Kafka contains a reverse list of "end - start" pairs for the splits
List<HostAddress> partitionNodes = ImmutableList.copyOf(Lists.transform(part.isr(), KafkaSplitManager::brokerToHostAddress));
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
for (int i = offsets.length - 1; i > 0; i--) {
KafkaSplit split = new KafkaSplit(
connectorId,
metadata.topic(),
kafkaTableHandle.getKeyDataFormat(),
kafkaTableHandle.getMessageDataFormat(),
part.partitionId(),
offsets[i],
offsets[i - 1],
partitionNodes);
splits.add(split);
}
}
}
return new FixedSplitSource(connectorId, splits.build());
}