本文整理汇总了Java中kafka.javaapi.PartitionMetadata类的典型用法代码示例。如果您正苦于以下问题:Java PartitionMetadata类的具体用法?Java PartitionMetadata怎么用?Java PartitionMetadata使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
PartitionMetadata类属于kafka.javaapi包,在下文中一共展示了PartitionMetadata类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPartitionMetadata
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
public static PartitionMetadata getPartitionMetadata(final SimpleConsumer consumer, final List<String> topics, final int partitionId) {
try {
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> topicMetadataList = resp.topicsMetadata();
for (TopicMetadata metaData : topicMetadataList) {
for (PartitionMetadata part : metaData.partitionsMetadata()) {
if (part.partitionId() == partitionId) {
return part;
}
}
}
} catch (Exception e) {
LOG.warn("Unable to fetch partition meta data from host[{}:{}] [{}:{}]", consumer.host(), consumer.port(), topics, partitionId, e);
}
return null;
}
示例2: updateLeaderMap
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private void updateLeaderMap() {
for (String broker : brokerList) {
try {
SimpleConsumer consumer = getSimpleConsumer(broker);
TopicMetadataRequest req = new TopicMetadataRequest(auditTopics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata tmd : metaData) {
for (PartitionMetadata pmd : tmd.partitionsMetadata()) {
TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId());
partitionLeader.put(topicAndPartition, getHostPort(pmd.leader()));
}
}
} catch (Exception e) {
logger.warn("Got exception to get metadata from broker=" + broker, e);
}
}
}
示例3: getOffset
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private static OffsetInfo getOffset(String topic, PartitionMetadata partition) {
Broker broker = partition.leader();
SimpleConsumer consumer = new SimpleConsumer(broker.host(), broker.port(), 10000, 1000000,
"com.rekko.newrelic.storm.kafka");
try {
TopicAndPartition
topicAndPartition =
new TopicAndPartition(topic, partition.partitionId());
PartitionOffsetRequestInfo rquest = new PartitionOffsetRequestInfo(-1, 1);
Map<TopicAndPartition, PartitionOffsetRequestInfo>
map =
new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
map.put(topicAndPartition, rquest);
OffsetRequest req = new OffsetRequest(map, (short) 0, "com.rekko.newrelic.storm.kafka");
OffsetResponse resp = consumer.getOffsetsBefore(req);
OffsetInfo offset = new OffsetInfo();
offset.offset = resp.offsets(topic, partition.partitionId())[0];
return offset;
} finally {
consumer.close();
}
}
示例4: findNewLeader
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private String findNewLeader(KafkaBrokerInfo brokerInfo, String topic, int partition) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(brokerInfo, topic, partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
throw new Exception("Unable to find new leader after Broker failure. Exiting");
}
示例5: findNewLeader
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private String findNewLeader(List<KafkaBrokerInfo> brokerInfoList, String topic, int partition) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(brokerInfoList, topic, partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
throw new Exception("Unable to find new leader after Broker failure. Exiting");
}
示例6: getPartitionOffset
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private static Map<String, Long> getPartitionOffset(Map<String, Integer> brokers,
String topic, long whichTime) {
Map<String, Long> ret = new HashMap<String, Long>();
TreeMap<Integer, PartitionMetadata> metadatas = findLeader(brokers, topic);
int sum = 0;
for (Entry<Integer, PartitionMetadata> entry : metadatas.entrySet()) {
int partition = entry.getKey();
String leadBroker = entry.getValue().leader().host();
long readOffset = getLastOffset(leadBroker, entry.getValue().leader().port(),
topic, partition, whichTime);
sum += readOffset;
ret.put(topic + "-" + partition, readOffset);
}
LOG.info("topic( {} ) records sum: {}", topic, sum);
return ret;
}
示例7: refreshTopicMetadata
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private void refreshTopicMetadata(KafkaPartition partition) {
for (String broker : KafkaWrapper.this.getBrokers()) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
TopicMetadata topicMetadata = topicMetadataList.get(0);
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition.getId()) {
partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
partitionMetadata.leader().port());
break;
}
}
break;
}
}
}
示例8: execute
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
@Override
public ResultSet execute(ConsumerConnection connection, String... params) throws TopicNotFoundException, NoPartitionsException
{
List<PartitionMetadata> metadata = null;
List<String> columns = new ArrayList<String>();
List<Integer> types = new ArrayList<Integer>();
List<String> typeClassNames = new ArrayList<String>();
columns.add("leader");
types.add(Types.BINARY);
typeClassNames.add("kafka.cluster.Broker");
columns.add("partitionId");
types.add(Types.INTEGER);
typeClassNames.add("java.lang.Integer");
columns.add("sizeInBytes");
types.add(Types.INTEGER);
typeClassNames.add("java.lang.Integer");
metadata = connection.getAllPartsMetadata();
return new PartitionMetadataResultSet(metadata, new ResultSetMetaData(columns, types, typeClassNames));
}
示例9: main1
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
public static void main1(String[] args) throws Exception{
List<String> topics = new ArrayList<String>();
topics.add("applog");
List<TopicMetadata> topicMetaData = getTopicMetaData(topics);
for (TopicMetadata meta : topicMetaData) {
System.out.println(meta.topic());
List<PartitionMetadata> list = meta.partitionsMetadata();
System.out.println(list.size());
for (PartitionMetadata pmeta : list) {
System.out.println("id: " + pmeta.partitionId());
System.out.println("isr: " + pmeta.isr());
System.out.println("leader: " + pmeta.leader());
System.out.println("replicas: " + pmeta.replicas());
}
}
}
示例10: findNewLeader
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
LOG.info("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
}
示例11: findNewLeader
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private HostAndPort findNewLeader(HostAndPort oldLeader, String topic, int partition) throws StageException {
//try 3 times to find a new leader
for (int i = 0; i < 3; i++) {
boolean sleep;
PartitionMetadata metadata = getPartitionMetadata(replicaBrokers, topic, partition);
if (metadata == null || metadata.leader() == null) {
sleep = true;
} else if (oldLeader.getHostText().equalsIgnoreCase(metadata.leader().host()) && i == 0) {
//leader has not yet changed, give zookeeper sometime
sleep = true;
} else {
return HostAndPort.fromParts(metadata.leader().host(), metadata.leader().port());
}
if (sleep) {
ThreadUtil.sleep(ONE_SECOND);
}
}
LOG.error(KafkaErrors.KAFKA_21.getMessage());
throw new StageException(KafkaErrors.KAFKA_21);
}
示例12: findNewLeader
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
}
示例13: findNewLeader
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private Broker findNewLeader(Broker oldLeader) throws InterruptedException {
long retryCnt = 0;
while (true) {
PartitionMetadata metadata = findLeader();
logger.debug("findNewLeader - meta leader {}, previous leader {}", metadata, oldLeader);
if (metadata != null && metadata.leader() != null && (oldLeader == null ||
(!(oldLeader.host().equalsIgnoreCase(metadata.leader().host()) &&
(oldLeader.port() == metadata.leader().port())) || retryCnt != 0))) {
// first time through if the leader hasn't changed give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
logger.info("findNewLeader - using new leader {} from meta data, previous leader {}", metadata.leader(), oldLeader);
return metadata.leader();
}
//TODO: backoff retry
Thread.sleep(1000L);
retryCnt ++;
// if could not find the leader for current replicaBrokers, let's try to find one via allBrokers
if (retryCnt >= 3 && (retryCnt - 3) % 5 == 0) {
logger.warn("can nof find leader for {} - {} after {} retries", topic, partitionId, retryCnt);
replicaBrokers.clear();
replicaBrokers.addAll(allBrokers);
}
}
}
示例14: getPartitionsForTopic
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
List<KafkaPartition> partitions = Lists.newArrayList();
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (null == partitionMetadata) {
LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
return Collections.emptyList();
}
if (null == partitionMetadata.leader()) {
LOG.error(
"Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
return Collections.emptyList();
}
partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
.withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
.withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
}
return partitions;
}
示例15: getPartitionsForTopic
import kafka.javaapi.PartitionMetadata; //导入依赖的package包/类
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
List<KafkaPartition> partitions = Lists.newArrayList();
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (null == partitionMetadata) {
log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
return Collections.emptyList();
}
if (null == partitionMetadata.leader()) {
log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
+ partitionMetadata);
return Collections.emptyList();
}
partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
.withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
.withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
}
return partitions;
}