本文整理汇总了Java中kafka.javaapi.PartitionMetadata.partitionId方法的典型用法代码示例。如果您正苦于以下问题:Java PartitionMetadata.partitionId方法的具体用法?Java PartitionMetadata.partitionId怎么用?Java PartitionMetadata.partitionId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kafka.javaapi.PartitionMetadata
的用法示例。
在下文中一共展示了PartitionMetadata.partitionId方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getPartitionMetadata
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
public static PartitionMetadata getPartitionMetadata(final SimpleConsumer consumer, final List<String> topics, final int partitionId) {
try {
TopicMetadataRequest req = new TopicMetadataRequest(topics);
TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> topicMetadataList = resp.topicsMetadata();
for (TopicMetadata metaData : topicMetadataList) {
for (PartitionMetadata part : metaData.partitionsMetadata()) {
if (part.partitionId() == partitionId) {
return part;
}
}
}
} catch (Exception e) {
LOG.warn("Unable to fetch partition meta data from host[{}:{}] [{}:{}]", consumer.host(), consumer.port(), topics, partitionId, e);
}
return null;
}
示例2: updateLeaderMap
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private void updateLeaderMap() {
for (String broker : brokerList) {
try {
SimpleConsumer consumer = getSimpleConsumer(broker);
TopicMetadataRequest req = new TopicMetadataRequest(auditTopics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata tmd : metaData) {
for (PartitionMetadata pmd : tmd.partitionsMetadata()) {
TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId());
partitionLeader.put(topicAndPartition, getHostPort(pmd.leader()));
}
}
} catch (Exception e) {
logger.warn("Got exception to get metadata from broker=" + broker, e);
}
}
}
示例3: getOffset
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private static OffsetInfo getOffset(String topic, PartitionMetadata partition) {
Broker broker = partition.leader();
SimpleConsumer consumer = new SimpleConsumer(broker.host(), broker.port(), 10000, 1000000,
"com.rekko.newrelic.storm.kafka");
try {
TopicAndPartition
topicAndPartition =
new TopicAndPartition(topic, partition.partitionId());
PartitionOffsetRequestInfo rquest = new PartitionOffsetRequestInfo(-1, 1);
Map<TopicAndPartition, PartitionOffsetRequestInfo>
map =
new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
map.put(topicAndPartition, rquest);
OffsetRequest req = new OffsetRequest(map, (short) 0, "com.rekko.newrelic.storm.kafka");
OffsetResponse resp = consumer.getOffsetsBefore(req);
OffsetInfo offset = new OffsetInfo();
offset.offset = resp.offsets(topic, partition.partitionId())[0];
return offset;
} finally {
consumer.close();
}
}
示例4: refreshTopicMetadata
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private void refreshTopicMetadata(KafkaPartition partition) {
for (String broker : KafkaWrapper.this.getBrokers()) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
TopicMetadata topicMetadata = topicMetadataList.get(0);
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition.getId()) {
partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
partitionMetadata.leader().port());
break;
}
}
break;
}
}
}
示例5: refreshTopicMetadata
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private void refreshTopicMetadata(KafkaPartition partition) {
for (String broker : this.brokers) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
TopicMetadata topicMetadata = topicMetadataList.get(0);
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition.getId()) {
partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
.leader().port());
break;
}
}
break;
}
}
}
示例6: findLeader
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private PartitionMetadata findLeader(KafkaBrokerInfo brokerInfo, String topic, int partition) {
PartitionMetadata returnMetaData = null;
List<PartitionMetadata> list = getPartitionList(brokerInfo, topic);
for (PartitionMetadata part : list) {
if (part.partitionId() == partition) {
return part;
}
}
return null;
}
示例7: findLeader
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private PartitionMetadata findLeader(String brokerHost, int brokerPort, String topic, int partition) {
PartitionMetadata returnMetaData = null;
List<PartitionMetadata> list = getPartitionList(brokerHost, brokerPort, topic);
for (PartitionMetadata part : list) {
if (part.partitionId() == partition) {
return part;
}
}
return null;
}
示例8: getPartitionForTopic
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
/**
* @param brokerList
* @param topic
* @param partition
* @return Get the partition metadata for specific topic and partition via the brokerList<br>
* null if topic is not found
*/
public static PartitionMetadata getPartitionForTopic(Set<String> brokerList, String topic, int partition)
{
List<PartitionMetadata> pmds = getPartitionsForTopic(brokerList, topic);
if (pmds == null) {
return null;
}
for (PartitionMetadata pmd : pmds) {
if (pmd.partitionId() != partition) {
continue;
}
return pmd;
}
return null;
}
示例9: initializeLastProcessingOffset
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private void initializeLastProcessingOffset()
{
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}
示例10: findLeader
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private HostAndPort findLeader(TopicPartition topicPartition) {
SimpleConsumer consumer = null;
try {
LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition());
consumer = createConsumer(
mConfig.getKafkaSeedBrokerHost(),
mConfig.getKafkaSeedBrokerPort(),
"leaderLookup");
List<String> topics = new ArrayList<String>();
topics.add(topicPartition.getTopic());
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
List<TopicMetadata> metaData = response.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == topicPartition.getPartition()) {
return HostAndPort.fromParts(part.leader().host(), part.leader().port());
}
}
}
} finally {
if (consumer != null) {
consumer.close();
}
}
return null;
}
示例11: storeMetadata
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
private void storeMetadata(String topic, PartitionMetadata p) {
Integer id = new Integer(p.partitionId());
Map<Integer, PartitionMetadata> m;
if (topicMetadataMap.containsKey(id)) {
LOG.info("already crreated a partitionMap. Just retrieve it."); //xxx
m = topicMetadataMap.get(topic);
} else {
LOG.info("making a new partitionMap"); //xxx
m = new HashMap<Integer, PartitionMetadata>();
topicMetadataMap.put(topic, m);
}
m.put(id, p);
}
示例12: findPartitionMetadata
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
public PartitionMetadata findPartitionMetadata(String topic, int partition) throws Exception {
TopicMetadata topicMetadata = findTopicMetadata(topic);
for (PartitionMetadata sel : topicMetadata.partitionsMetadata()) {
if (sel.partitionId() == partition)
return sel;
}
return null;
}
示例13: findPartition
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
PartitionMetadata findPartition(TopicMetadata topicMetadata, int partition) {
for (PartitionMetadata sel : topicMetadata.partitionsMetadata()) {
if (sel.partitionId() == partition)
return sel;
}
throw new RuntimeException("Cannot find the partition " + partition);
}
示例14: findPartition
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
PartitionMetadata findPartition(TopicMetadata topicMetadata, int partion) {
for (PartitionMetadata sel : topicMetadata.partitionsMetadata()) {
if (sel.partitionId() == partition)
return sel;
}
throw new RuntimeException("Cannot find the partition " + partition);
}
示例15: getAllTopicPartition
import kafka.javaapi.PartitionMetadata; //导入方法依赖的package包/类
/***
* Dedupe the partition metadata from all brokers
*
* @return Deduped topic metadata
*/
public List<kafka.javaapi.TopicMetadata> getAllTopicPartition()
{
List<kafka.javaapi.TopicMetadata> topicMetadataList = getMetaDataFromAllBrokers();
HashSet<TopicPartition> exploredTopicPartition = new HashSet<TopicPartition>();
List<kafka.javaapi.TopicMetadata> ret = new ArrayList<TopicMetadata>();
// Filter any white list topics
HashSet<String> whiteListTopics = new HashSet<String>(m_mDProps.whiteListTopics);
if (!whiteListTopics.isEmpty()) {
topicMetadataList = filterWhitelistTopics(topicMetadataList, whiteListTopics);
}
// Filter all blacklist topics
HashSet<String> blackListTopics = new HashSet<String>(m_mDProps.blackListTopics);
String regex = "";
if (!blackListTopics.isEmpty()) {
regex = createTopicRegEx(blackListTopics);
}
for (TopicMetadata item : topicMetadataList)
{
if (Pattern.matches(regex, item.topic())) {
m_logger.debug("Discarding topic (blacklisted): " + item.topic());
continue;
}
List<kafka.api.PartitionMetadata> pml = new ArrayList<kafka.api.PartitionMetadata>();
for (PartitionMetadata part : item.partitionsMetadata())
{
if (!exploredTopicPartition.contains(new TopicPartition(item.topic(), part.partitionId())))
{
kafka.api.PartitionMetadata pm =
new kafka.api.PartitionMetadata(
part.partitionId(),
Option.apply(part.leader()),
JavaConversions.asScalaBuffer(part.replicas()).toList(),
JavaConversions.asScalaBuffer(part.isr()).toList(),
part.errorCode());
pml.add(pm);
exploredTopicPartition.add(new TopicPartition(item.topic(), part.partitionId()));
}
}
if (pml.size() > 0)
{
kafka.api.TopicMetadata tm =
new kafka.api.TopicMetadata(
item.topic(),
JavaConversions.asScalaBuffer(pml).toList(),
item.errorCode());
ret.add(new kafka.javaapi.TopicMetadata(tm));
}
}
Collections.sort(ret, new TopicMetadataComparator());
return ret;
}