本文整理汇总了Java中kafka.javaapi.TopicMetadataResponse类的典型用法代码示例。如果您正苦于以下问题:Java TopicMetadataResponse类的具体用法?Java TopicMetadataResponse怎么用?Java TopicMetadataResponse使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TopicMetadataResponse类属于kafka.javaapi包,在下文中一共展示了TopicMetadataResponse类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPartitionMetadata
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
public static PartitionMetadata getPartitionMetadata(final SimpleConsumer consumer, final List<String> topics, final int partitionId) {
try {
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> topicMetadataList = resp.topicsMetadata();
for (TopicMetadata metaData : topicMetadataList) {
for (PartitionMetadata part : metaData.partitionsMetadata()) {
if (part.partitionId() == partitionId) {
return part;
}
}
}
} catch (Exception e) {
LOG.warn("Unable to fetch partition meta data from host[{}:{}] [{}:{}]", consumer.host(), consumer.port(), topics, partitionId, e);
}
return null;
}
示例2: doesTopicExist
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
public boolean doesTopicExist(String topic) {
log.debug("Does Topic {} exist?", topic);
SimpleConsumer consumer = new SimpleConsumer(host, port, soTimeout, bufferSize, clientId);
List<String> topics = new ArrayList<>();
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metadata = response.topicsMetadata();
for (TopicMetadata item : metadata) {
if (item.topic().equals(topic)) {
log.debug("Found Topic {}.", topic);
return true;
}
}
log.debug("Did not find Topic {}.", topic);
return false;
}
示例3: getNumPartitions
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
public int getNumPartitions(String topic) {
SimpleConsumer consumer = null;
try {
consumer = createConsumer(
mConfig.getKafkaSeedBrokerHost(),
mConfig.getKafkaSeedBrokerPort(),
"partitionLookup");
List<String> topics = new ArrayList<String>();
topics.add(topic);
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
if (response.topicsMetadata().size() != 1) {
throw new RuntimeException("Expected one metadata for topic " + topic + " found " +
response.topicsMetadata().size());
}
TopicMetadata topicMetadata = response.topicsMetadata().get(0);
return topicMetadata.partitionsMetadata().size();
} finally {
if (consumer != null) {
consumer.close();
}
}
}
示例4: findTopicMetadata
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
public TopicMetadata findTopicMetadata(final String topic, int retries) throws Exception {
Operation<TopicMetadata> findTopicOperation = new Operation<TopicMetadata>() {
@Override
public TopicMetadata execute() throws Exception {
List<String> topics = Collections.singletonList(topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> topicMetadatas = resp.topicsMetadata();
if (topicMetadatas.size() != 1) {
throw new Exception("Expect to find 1 topic " + topic + ", but found " + topicMetadatas.size());
}
return topicMetadatas.get(0);
}
};
return execute(findTopicOperation, retries);
}
示例5: getTopicMetaData
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
/** 根据Topic列表返回TopicMetaData信息
*
* @param topics
* @return */
public static List<TopicMetadata> getTopicMetaData(List<String> topics) {
SimpleConsumer simpleConsumer = SimpleKafkaHelper.getDefaultSimpleConsumer();
TopicMetadataRequest metaDataRequest = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = simpleConsumer.send(metaDataRequest);
List<TopicMetadata> metadatas = resp.topicsMetadata();
return metadatas;
}
示例6: listTopics
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
public List<String> listTopics() {
log.debug("List Topics");
SimpleConsumer consumer = new SimpleConsumer(host, port, soTimeout, bufferSize, clientId);
List<String> topics = new ArrayList<>();
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metadata = response.topicsMetadata();
for (TopicMetadata item : metadata) {
topics.add(item.topic());
}
log.debug("Found {} Topics", topics.size());
return topics;
}
示例7: findLeader
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
private HostAndPort findLeader(TopicPartition topicPartition) {
SimpleConsumer consumer = null;
try {
LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition());
consumer = createConsumer(
mConfig.getKafkaSeedBrokerHost(),
mConfig.getKafkaSeedBrokerPort(),
"leaderLookup");
List<String> topics = new ArrayList<String>();
topics.add(topicPartition.getTopic());
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metaData = response.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == topicPartition.getPartition()) {
return HostAndPort.fromParts(part.leader().host(), part.leader().port());
}
}
}
} finally {
if (consumer != null) {
consumer.close();
}
}
return null;
}
示例8: getNumPartitionsForTopic
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
@Override
public Integer getNumPartitionsForTopic(String topic){
TopicMetadataRequest topicRequest = new TopicMetadataRequest(Arrays.asList(topic));
TopicMetadataResponse topicResponse = simpleConsumer.send(topicRequest);
for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
if (topic.equals(topicMetadata.topic())) {
int partitionSize = topicMetadata.partitionsMetadata().size();
logger.debug("Partition size found ({}) for {} topic", partitionSize, topic);
return partitionSize;
}
}
logger.warn("Metadata info not found!. TOPIC {}", topic);
return null;
}
示例9: send
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
@Override
public TopicMetadataResponse send(TopicMetadataRequest request) {
java.util.List<String> topics = request.topics();
TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];
for (int i = 0; i < topicMetadataArray.length; i++) {
String topic = topics.get(i);
if (!topic.equals(topicName)) {
topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
} else {
PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
for (int j = 0; j < partitionCount; j++) {
java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
partitionMetadataArray[j] = new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]),
emptyScalaList, emptyScalaList, Errors.NONE.code());
}
Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
}
}
Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);
return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
}
示例10: getSplits
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout)
{
KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata part : metadata.partitionsMetadata()) {
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
Broker leader = part.leader();
if (leader == null) { // Leader election going on...
log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
continue;
}
HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
// Kafka contains a reverse list of "end - start" pairs for the splits
List<HostAddress> partitionNodes = ImmutableList.copyOf(Lists.transform(part.isr(), KafkaSplitManager::brokerToHostAddress));
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
for (int i = offsets.length - 1; i > 0; i--) {
KafkaSplit split = new KafkaSplit(
connectorId,
metadata.topic(),
kafkaTableHandle.getKeyDataFormat(),
kafkaTableHandle.getMessageDataFormat(),
part.partitionId(),
offsets[i],
offsets[i - 1],
partitionNodes);
splits.add(split);
}
}
}
return new FixedSplitSource(connectorId, splits.build());
}
示例11: getPartitionCount
import kafka.javaapi.TopicMetadataResponse; //导入依赖的package包/类
public synchronized int getPartitionCount(String topic, long timeoutMillis) {
int unknownTopicReplyCount = 0;
final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10;
int kafkaErrorCount = 0;
final int MAX_KAFKA_ERROR_COUNT = 10;
final long endTime = System.currentTimeMillis() + timeoutMillis;
while(System.currentTimeMillis() < endTime) {
// Try to get into a state where we're connected to Kafka
while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) {
_currentState.process();
}
if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) {
throw new TimeoutException("Failed to get the partition count for topic " + topic + " within " + timeoutMillis
+ " ms");
}
// Send the metadata request to Kafka
TopicMetadataResponse topicMetadataResponse = null;
try {
topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(topic)));
} catch (Exception e) {
_currentState.handleConsumerException(e);
continue;
}
final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0);
final short errorCode = topicMetadata.errorCode();
if (errorCode == Errors.NONE.code()) {
return topicMetadata.partitionsMetadata().size();
} else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
// If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) {
throw new RuntimeException("Invalid topic name " + topic);
} else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) {
throw new RuntimeException("Topic " + topic + " does not exist");
} else {
// Kafka topic creation can sometimes take some time, so we'll retry after a little bit
unknownTopicReplyCount++;
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
} else {
// Retry after a short delay
kafkaErrorCount++;
if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
throw exceptionForKafkaErrorCode(errorCode);
}
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
throw new TimeoutException();
}