本文整理匯總了Java中org.apache.kafka.common.PartitionInfo.partition方法的典型用法代碼示例。如果您正苦於以下問題:Java PartitionInfo.partition方法的具體用法?Java PartitionInfo.partition怎麽用?Java PartitionInfo.partition使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.kafka.common.PartitionInfo
的用法示例。
在下文中一共展示了PartitionInfo.partition方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: fillInMissingPartitions
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
/**
* Add empty load of all the partitions that exists in the current cluster but missing from the
* metric aggregation result.
*/
private void fillInMissingPartitions(Map<TopicPartition, Snapshot[]> loadSnapshots,
Cluster kafkaCluster,
ClusterModel clusterModel) throws ModelInputException {
// There must be at least one entry, otherwise there will be exception thrown earlier. So we don't need to
// check if it has next
Snapshot[] snapshotsForTimestamps = loadSnapshots.values().iterator().next();
Snapshot[] emptyLoadSnapshots = new Snapshot[snapshotsForTimestamps.length];
for (int i = 0; i < emptyLoadSnapshots.length; i++) {
emptyLoadSnapshots[i] = new Snapshot(snapshotsForTimestamps[i].time());
}
for (Node node : kafkaCluster.nodes()) {
for (PartitionInfo partitionInfo : kafkaCluster.partitionsForNode(node.id())) {
TopicPartition tp = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
if (!loadSnapshots.containsKey(tp)) {
populateSnapshots(kafkaCluster, clusterModel, tp, emptyLoadSnapshots);
}
}
}
}
示例2: getCount
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
/**
* Gets the total message count for the topic.
* <b>WARNING: Don't use with compacted topics</b>
*/
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
KafkaConsumer consumer = buildConsumer(kafkaBrokers);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
logger.warn("Partition information was not found for topic {}", topic);
return 0;
} else {
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
partitions.add(partition);
}
Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
return diffOffsets(beginningOffsets, endingOffsets);
}
} finally {
consumer.close();
}
}
示例3: hasPartition
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
private boolean hasPartition(final TopicPartition topicPartition) {
final List<PartitionInfo> partitions = partitionInfo.get(topicPartition.topic());
if (partitions == null) {
return false;
}
for (final PartitionInfo partition : partitions) {
if (partition.partition() == topicPartition.partition()) {
return true;
}
}
return false;
}
示例4: getBrokerLeaderPartitions
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
public Map<Integer, List<TopicPartition>> getBrokerLeaderPartitions(
Map<String, List<PartitionInfo>> topicPartitonInfoMap) {
Map<Integer, List<TopicPartition>> result = new HashMap<>();
for (String topic : topicPartitonInfoMap.keySet()) {
List<PartitionInfo> partitionInfoList = topicPartitonInfoMap.get(topic);
if (partitionInfoList == null) {
LOG.error("Failed to get partition info for {}", topic);
continue;
}
for (PartitionInfo info : partitionInfoList) {
Node leaderNode = info.leader();
if (leaderNode != null) {
result.putIfAbsent(leaderNode.id(), new ArrayList<>());
TopicPartition topicPartiton = new TopicPartition(info.topic(), info.partition());
result.get(leaderNode.id()).add(topicPartiton);
}
}
}
return result;
}
示例5: run
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
/**
* When an object implementing interface <code>Runnable</code> is used
* to create a thread, starting the thread causes the object's
* <code>run</code> method to be called in that separately executing
* thread.
* <p>
* The general contract of the method <code>run</code> is that it may
* take any action whatsoever.
*
* @see Thread#run()
*/
@Override
public void run() {
String group = "kafka-insight-logOffsetListener";
int sleepTime = 60000;
KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;
while (true) {
try {
if (null == kafkaConsumer) {
kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
}
Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
for (PartitionInfo partitionInfo : partitionInfoList) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
kafkaConsumer.assign(topicPartitions);
kafkaConsumer.seekToEnd(topicPartitions);
Long logEndOffset = kafkaConsumer.position(topicPartition);
logEndOffsetMap.put(topicPartition, logEndOffset);
}
}
Thread.sleep(sleepTime);
} catch (Exception e) {
e.printStackTrace();
if (null != kafkaConsumer) {
kafkaConsumer.close();
kafkaConsumer = null;
}
}
}
}
示例6: getKafkaOffsets
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets(
KafkaConsumer<String, byte[]> client, String topicStr) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
List<PartitionInfo> partitions = client.partitionsFor(topicStr);
for (PartitionInfo partition : partitions) {
TopicPartition key = new TopicPartition(topicStr, partition.partition());
OffsetAndMetadata offsetAndMetadata = client.committed(key);
if (offsetAndMetadata != null) {
offsets.put(key, offsetAndMetadata);
}
}
return offsets;
}
示例7: getKafkaOffsets
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets(
KafkaConsumer<String, byte[]> client) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
List<PartitionInfo> partitions = client.partitionsFor(topicStr);
for (PartitionInfo partition : partitions) {
TopicPartition key = new TopicPartition(topicStr, partition.partition());
OffsetAndMetadata offsetAndMetadata = client.committed(key);
if (offsetAndMetadata != null) {
offsets.put(key, offsetAndMetadata);
}
}
return offsets;
}
示例8: resetCorrectOffsets
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
/**
* 按上次記錄重置offsets
*/
private void resetCorrectOffsets() {
consumer.pause(consumer.assignment());
Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics();
Set<String> topics = topicInfos.keySet();
List<String> expectTopics = new ArrayList<>(topicHandlers.keySet());
List<PartitionInfo> patitions = null;
for (String topic : topics) {
if (!expectTopics.contains(topic))
continue;
patitions = topicInfos.get(topic);
for (PartitionInfo partition : patitions) {
try {
//期望的偏移
long expectOffsets = consumerContext.getLatestProcessedOffsets(topic,
partition.partition());
//
TopicPartition topicPartition = new TopicPartition(topic,
partition.partition());
OffsetAndMetadata metadata = consumer
.committed(new TopicPartition(partition.topic(), partition.partition()));
if (expectOffsets >= 0) {
if (expectOffsets < metadata.offset()) {
consumer.seek(topicPartition, expectOffsets);
logger.info("seek Topic[{}] partition[{}] from {} to {}", topic,
partition.partition(), metadata.offset(), expectOffsets);
}
}
} catch (Exception e) {
logger.warn("try seek topic[" + topic + "] partition[" + partition.partition()
+ "] offsets error");
}
}
}
consumer.resume(consumer.assignment());
}
示例9: recentSnapshotsForTopic
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
/**
* Get all the snapshots for a topic before the given time, excluding the snapshot window that the given time falls in.
*/
private MetricSampleAggregationResult recentSnapshotsForTopic(String topic,
Cluster cluster,
Map<Long, Map<TopicPartition, AggregatedMetrics>> readyWindowedAggMetrics,
boolean includeAllTopics)
throws NotEnoughSnapshotsException {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
MetricSampleAggregationResult result = new MetricSampleAggregationResult(currentGeneration(), includeAllTopics);
for (PartitionInfo partition : partitions) {
TopicPartition tp = new TopicPartition(topic, partition.partition());
Snapshot[] snapshots = new Snapshot[readyWindowedAggMetrics.size()];
int index = 0;
for (Map.Entry<Long, Map<TopicPartition, AggregatedMetrics>> entry : readyWindowedAggMetrics.entrySet()) {
long snapshotWindow = entry.getKey();
SnapshotAndImputation partitionSnapShot =
partitionSnapshotForWindow(tp, cluster, snapshotWindow, snapshotWindow, includeAllTopics, true);
// There is an invalid partition for the topic, we just exclude the topic by returning the result immediately.
// At this point the result does not contain any snapshot, but only partition flaw.
if (partitionSnapShot.snapshot() == null) {
result.discardAllSnapshots();
result.recordPartitionWithSampleFlaw(tp, snapshotWindow, partitionSnapShot.imputation());
return result;
}
// Record the imputation and return the result without snapshot if there has been too many imputations.
if (partitionSnapShot.imputation() != null) {
result.recordPartitionWithSampleFlaw(tp, snapshotWindow, partitionSnapShot.imputation());
if (result.sampleFlaw(tp).size() > _numSnapshots * MAX_SNAPSHOT_PERCENT_WITH_FLAWS) {
LOG.debug("{} already has {} snapshot with flaws, excluding the partition from the model.",
tp, result.sampleFlaw(tp).size());
result.discardAllSnapshots();
return result;
}
}
snapshots[index++] = partitionSnapShot.snapshot();
}
result.addPartitionSnapshots(new TopicPartition(partition.topic(), partition.partition()), snapshots);
}
return result;
}
示例10: getUnderReplicatedPartitions
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
/**
* Call the kafka api to get the list of under-replicated partitions.
* When a topic partition loses all of its replicas, it will not have a leader broker.
* We need to handle this special case in detecting under replicated topic partitions.
*/
public static List<PartitionInfo> getUnderReplicatedPartitions(
String zkUrl, List<String> topics,
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
partitionAssignments,
Map<String, Integer> replicationFactors,
Map<String, Integer> partitionCounts) {
List<PartitionInfo> underReplicated = new ArrayList();
KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl);
for (String topic : topics) {
List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);
if (partitionInfoList == null) {
LOG.error("Failed to get partition info for {}", topic);
continue;
}
int numPartitions = partitionCounts.get(topic);
// when a partition loses all replicas and does not have a live leader,
// kafkaconsumer.partitionsFor(...) will not return info for that partition.
// the noLeaderFlag array is used to detect partitions that have no leaders
boolean[] noLeaderFlags = new boolean[numPartitions];
for (int i = 0; i < numPartitions; i++) {
noLeaderFlags[i] = true;
}
for (PartitionInfo info : partitionInfoList) {
if (info.inSyncReplicas().length < info.replicas().length &&
replicationFactors.get(info.topic()) > info.inSyncReplicas().length) {
underReplicated.add(info);
}
noLeaderFlags[info.partition()] = false;
}
// deal with the partitions that do not have leaders
for (int partitionId = 0; partitionId < numPartitions; partitionId++) {
if (noLeaderFlags[partitionId]) {
Seq<Object> seq = partitionAssignments.get(topic).get().get(partitionId).get();
Node[] nodes = JavaConverters.seqAsJavaList(seq).stream()
.map(val -> new Node((Integer) val, "", -1)).toArray(Node[]::new);
PartitionInfo partitionInfo =
new PartitionInfo(topic, partitionId, null, nodes, new Node[0]);
underReplicated.add(partitionInfo);
}
}
}
return underReplicated;
}
示例11: OutOfSyncReplica
import org.apache.kafka.common.PartitionInfo; //導入方法依賴的package包/類
public OutOfSyncReplica(PartitionInfo partitionInfo) {
this.topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
this.inSyncBrokers = getInSyncReplicas(partitionInfo);
this.outOfSyncBrokers = getOutOfSyncReplicas(partitionInfo);
this.leader = partitionInfo.leader();
}