本文整理匯總了Java中org.apache.kafka.common.PartitionInfo類的典型用法代碼示例。如果您正苦於以下問題:Java PartitionInfo類的具體用法?Java PartitionInfo怎麽用?Java PartitionInfo使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
PartitionInfo類屬於org.apache.kafka.common包,在下文中一共展示了PartitionInfo類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: createConsumerAndSubscribe
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
/**
* Create a new KafkaConsumer based on the passed in ClientConfig, and subscribe to the appropriate
* partitions.
*/
public KafkaConsumer createConsumerAndSubscribe(final ClientConfig clientConfig) {
final KafkaConsumer kafkaConsumer = createConsumer(clientConfig);
// Determine which partitions to subscribe to, for now do all
final List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(clientConfig.getTopicConfig().getTopicName());
// Pull out partitions, convert to topic partitions
final Collection<TopicPartition> topicPartitions = new ArrayList<>();
for (final PartitionInfo partitionInfo: partitionInfos) {
// Skip filtered partitions
if (!clientConfig.isPartitionFiltered(partitionInfo.partition())) {
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
}
// Assign them.
kafkaConsumer.assign(topicPartitions);
// Return the kafka consumer.
return kafkaConsumer;
}
示例2: fillInMissingPartitions
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
/**
* Add empty load of all the partitions that exists in the current cluster but missing from the
* metric aggregation result.
*/
private void fillInMissingPartitions(Map<TopicPartition, Snapshot[]> loadSnapshots,
Cluster kafkaCluster,
ClusterModel clusterModel) throws ModelInputException {
// There must be at least one entry, otherwise there will be exception thrown earlier. So we don't need to
// check if it has next
Snapshot[] snapshotsForTimestamps = loadSnapshots.values().iterator().next();
Snapshot[] emptyLoadSnapshots = new Snapshot[snapshotsForTimestamps.length];
for (int i = 0; i < emptyLoadSnapshots.length; i++) {
emptyLoadSnapshots[i] = new Snapshot(snapshotsForTimestamps[i].time());
}
for (Node node : kafkaCluster.nodes()) {
for (PartitionInfo partitionInfo : kafkaCluster.partitionsForNode(node.id())) {
TopicPartition tp = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
if (!loadSnapshots.containsKey(tp)) {
populateSnapshots(kafkaCluster, clusterModel, tp, emptyLoadSnapshots);
}
}
}
}
示例3: populateSnapshots
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
private void populateSnapshots(Cluster kafkaCluster,
ClusterModel clusterModel,
TopicPartition tp,
Snapshot[] leaderLoadSnapshots) throws ModelInputException {
PartitionInfo partitionInfo = kafkaCluster.partition(tp);
// If partition info does not exist, the topic may have been deleted.
if (partitionInfo != null) {
for (Node replica : partitionInfo.replicas()) {
boolean isLeader = partitionInfo.leader() != null && replica.id() == partitionInfo.leader().id();
String rack = getRackHandleNull(replica);
// Note that we assume the capacity resolver can still return the broker capacity even if the broker
// is dead. We need this to get the host resource capacity.
Map<Resource, Double> brokerCapacity =
_brokerCapacityConfigResolver.capacityForBroker(rack, replica.host(), replica.id());
clusterModel.createReplicaHandleDeadBroker(rack, replica.id(), tp, isLeader, brokerCapacity);
// Push the load snapshot to the replica one by one.
for (int i = 0; i < leaderLoadSnapshots.length; i++) {
clusterModel.pushLatestSnapshot(rack, replica.id(), tp,
isLeader ? leaderLoadSnapshots[i].duplicate() : MonitorUtils.toFollowerSnapshot(leaderLoadSnapshots[i]));
}
}
}
}
示例4: ensureTopicCreated
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
private void ensureTopicCreated(Map<String, ?> config) {
ZkUtils zkUtils = createZkUtils(config);
Map<String, List<PartitionInfo>> topics = _consumers.get(0).listTopics();
long snapshotWindowMs = Long.parseLong((String) config.get(KafkaCruiseControlConfig.LOAD_SNAPSHOT_WINDOW_MS_CONFIG));
int numSnapshotWindows = Integer.parseInt((String) config.get(KafkaCruiseControlConfig.NUM_LOAD_SNAPSHOTS_CONFIG));
long retentionMs = (numSnapshotWindows * ADDITIONAL_SNAPSHOT_WINDOW_TO_RETAIN_FACTOR) * snapshotWindowMs;
Properties props = new Properties();
props.setProperty(LogConfig.RetentionMsProp(), Long.toString(retentionMs));
props.setProperty(LogConfig.CleanupPolicyProp(), DEFAULT_CLEANUP_POLICY);
int replicationFactor = Math.min(2, zkUtils.getAllBrokersInCluster().size());
if (!topics.containsKey(_partitionMetricSampleStoreTopic)) {
AdminUtils.createTopic(zkUtils, _partitionMetricSampleStoreTopic, 32, replicationFactor, props, RackAwareMode.Safe$.MODULE$);
} else {
AdminUtils.changeTopicConfig(zkUtils, _partitionMetricSampleStoreTopic, props);
}
if (!topics.containsKey(_brokerMetricSampleStoreTopic)) {
AdminUtils.createTopic(zkUtils, _brokerMetricSampleStoreTopic, 32, replicationFactor, props, RackAwareMode.Safe$.MODULE$);
} else {
AdminUtils.changeTopicConfig(zkUtils, _brokerMetricSampleStoreTopic, props);
}
KafkaCruiseControlUtils.closeZkUtilsWithTimeout(zkUtils, 10000);
}
示例5: assignPartitions
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
@Override
public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) {
// Create an array to host the assignment of all the metric fetchers.
List<Set<TopicPartition>> assignments = new ArrayList<>();
for (int i = 0; i < numMetricFetchers; i++) {
assignments.add(new HashSet<>());
}
int index = 0;
// The total number of partitions that has been assigned.
int totalPartitionAssigned = 0;
for (String topic : cluster.topics()) {
while (assignments.get(index % numMetricFetchers).size() > totalPartitionAssigned / numMetricFetchers) {
index++;
}
Set<TopicPartition> assignmentForFetcher = assignments.get(index % numMetricFetchers);
List<PartitionInfo> partitionsForTopic = cluster.partitionsForTopic(topic);
for (PartitionInfo partitionInfo : partitionsForTopic) {
assignmentForFetcher.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
totalPartitionAssigned += partitionsForTopic.size();
}
// Print the assignments if the logger is set to debug level or lower.
maybeDumpAssignments(assignments);
return assignments;
}
示例6: getBrokerLeaderPartitions
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
public Map<Integer, List<TopicPartition>> getBrokerLeaderPartitions(
Map<String, List<PartitionInfo>> topicPartitonInfoMap) {
Map<Integer, List<TopicPartition>> result = new HashMap<>();
for (String topic : topicPartitonInfoMap.keySet()) {
List<PartitionInfo> partitionInfoList = topicPartitonInfoMap.get(topic);
if (partitionInfoList == null) {
LOG.error("Failed to get partition info for {}", topic);
continue;
}
for (PartitionInfo info : partitionInfoList) {
Node leaderNode = info.leader();
if (leaderNode != null) {
result.putIfAbsent(leaderNode.id(), new ArrayList<>());
TopicPartition topicPartiton = new TopicPartition(info.topic(), info.partition());
result.get(leaderNode.id()).add(topicPartiton);
}
}
}
return result;
}
示例7: getMetadata
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
private Metadata getMetadata(Collection<TopicPartition> partitions) {
Node node0 = new Node(0, "localhost", 100, "rack0");
Node node1 = new Node(1, "localhost", 100, "rack1");
Node[] nodes = {node0, node1};
Set<Node> allNodes = new HashSet<>();
allNodes.add(node0);
allNodes.add(node1);
Set<PartitionInfo> parts = new HashSet<>();
for (TopicPartition tp : partitions) {
parts.add(new PartitionInfo(tp.topic(), tp.partition(), node0, nodes, nodes));
}
Cluster cluster = new Cluster("cluster-id", allNodes, parts, Collections.emptySet(), Collections.emptySet());
Metadata metadata = new Metadata();
metadata.update(cluster, Collections.emptySet(), 0);
return metadata;
}
示例8: partitionsFor
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
/**
* Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it
* does not already have any metadata about the given topic.
*
* @param topic The topic to get partition metadata for
* @return The list of partitions
* @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
* function is called
* @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
* this function is called
* @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic
* @throws org.apache.kafka.common.errors.TimeoutException if the topic metadata could not be fetched before
* expiration of the configured request timeout
* @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
*/
@Override
public List<PartitionInfo> partitionsFor(String topic) {
acquire();
try {
Cluster cluster = this.metadata.fetch();
List<PartitionInfo> parts = cluster.partitionsForTopic(topic);
if (!parts.isEmpty())
return parts;
Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata(
new MetadataRequest.Builder(Collections.singletonList(topic), true), requestTimeoutMs);
return topicMetadata.get(topic);
} finally {
release();
}
}
示例9: getCount
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
/**
* Gets the total message count for the topic.
* <b>WARNING: Don't use with compacted topics</b>
*/
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
KafkaConsumer consumer = buildConsumer(kafkaBrokers);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
logger.warn("Partition information was not found for topic {}", topic);
return 0;
} else {
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
partitions.add(partition);
}
Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
return diffOffsets(beginningOffsets, endingOffsets);
}
} finally {
consumer.close();
}
}
示例10: verifyTopicsExist
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
public boolean verifyTopicsExist(String kafkaBrokers, Set<String> requiredTopics,
boolean checkPartitionCounts) {
Properties props = new Properties();
props.put("bootstrap.servers", kafkaBrokers);
props.put("group.id", UUID.randomUUID().toString());
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
KafkaConsumer consumer = new KafkaConsumer(props);
try {
@SuppressWarnings("unchecked")
Map<String, List<PartitionInfo>> topics = consumer.listTopics();
Set<Integer> partitionCount = new HashSet<>();
for (String requiredTopic : requiredTopics) {
List<PartitionInfo> partitions = topics.get(requiredTopic);
if (partitions == null) {
logger.info("Required kafka topic {} not present", requiredTopic);
return false;
}
partitionCount.add(partitions.size());
}
if (checkPartitionCounts && partitionCount.size() > 1) {
logger.warn("Partition count mismatch in topics {}",
Arrays.toString(requiredTopics.toArray()));
return false;
}
return true;
} finally {
consumer.close();
}
}
示例11: partition
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
if (keyBytes == null) {
int nextValue = roundRobin.getAndIncrement();
List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
if (availablePartitions.size() > 0) {
int part = Utils.toPositive(nextValue) % availablePartitions.size();
return availablePartitions.get(part).partition();
} else {
// no partitions are available, give a non-available partition
return Utils.toPositive(nextValue) % numPartitions;
}
} else {
// hash the keyBytes to choose a partition
return Utils.toPositive(xxHasher.hash(keyBytes, 0, keyBytes.length, SEED)) % numPartitions;
}
}
示例12: nullKeyRoundRobinThreeAvailablePartitionsTest
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
@Test
public void nullKeyRoundRobinThreeAvailablePartitionsTest() {
List<PartitionInfo> partitions = new ArrayList<>();
for (int i = 0; i < 3; i++) {
partitions.add(new PartitionInfo(null, i, null, null, null));
}
when(cluster.availablePartitionsForTopic(anyString())).thenReturn(partitions);
List<Integer> results = new ArrayList<>();
for (int i = 0; i < 12; i++) {
results.add(partitioner.partition("events", null, null,
null, null, cluster));
}
List<Integer> shouldBe = of(0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2);
assertThat(results).isEqualTo(shouldBe);
}
示例13: filterOutInReassignmentUrps
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
/**
* Remove the under-replicated partitions that are in the middle of partition reassignment.
*/
public List<PartitionInfo> filterOutInReassignmentUrps(List<PartitionInfo> urps,
Map<String, Integer> replicationFactors) {
List<PartitionInfo> result = new ArrayList<>();
for (PartitionInfo urp : urps) {
if (urp.replicas().length <= replicationFactors.get(urp.topic())) {
// # of replicas <= replication factor
result.add(urp);
} else {
// # of replicas > replication factor. this can happen after
// a failed partition reassignment
Set<Integer> liveReplicas = new HashSet<>();
for (Node node : urp.replicas()) {
if (node.host() != null && OperatorUtil.pingKafkaBroker(node.host(), 9092, 5000)) {
liveReplicas.add(node.id());
}
}
if (liveReplicas.size() < replicationFactors.get(urp.topic())) {
result.add(urp);
}
}
}
return result;
}
示例14: testUpdateWithNeedMetadataForAllTopics
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
@Test
public void testUpdateWithNeedMetadataForAllTopics() {
long time = 0;
metadata.update(Cluster.empty(), Collections.<String>emptySet(), time);
metadata.needMetadataForAllTopics(true);
final List<String> expectedTopics = Collections.singletonList("topic");
metadata.setTopics(expectedTopics);
metadata.update(new Cluster(null,
Collections.singletonList(new Node(0, "host1", 1000)),
Arrays.asList(
new PartitionInfo("topic", 0, null, null, null),
new PartitionInfo("topic1", 0, null, null, null)),
Collections.<String>emptySet(),
Collections.<String>emptySet()),
Collections.<String>emptySet(), 100);
assertArrayEquals("Metadata got updated with wrong set of topics.",
expectedTopics.toArray(), metadata.topics().toArray());
metadata.needMetadataForAllTopics(false);
}
示例15: newMetadataResponse
import org.apache.kafka.common.PartitionInfo; //導入依賴的package包/類
private MetadataResponse newMetadataResponse(String topic, Errors error) {
List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>();
if (error == Errors.NONE) {
for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
partitionsMetadata.add(new MetadataResponse.PartitionMetadata(
Errors.NONE,
partitionInfo.partition(),
partitionInfo.leader(),
Arrays.asList(partitionInfo.replicas()),
Arrays.asList(partitionInfo.inSyncReplicas())));
}
}
MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata);
return new MetadataResponse(cluster.nodes(), null, MetadataResponse.NO_CONTROLLER_ID, Arrays.asList(topicMetadata));
}