本文整理汇总了Java中org.apache.kafka.common.Cluster.partitionsForTopic方法的典型用法代码示例。如果您正苦于以下问题:Java Cluster.partitionsForTopic方法的具体用法?Java Cluster.partitionsForTopic怎么用?Java Cluster.partitionsForTopic使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.kafka.common.Cluster
的用法示例。
在下文中一共展示了Cluster.partitionsForTopic方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: assignPartitions
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
@Override
public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) {
// Create an array to host the assignment of all the metric fetchers.
List<Set<TopicPartition>> assignments = new ArrayList<>();
for (int i = 0; i < numMetricFetchers; i++) {
assignments.add(new HashSet<>());
}
int index = 0;
// The total number of partitions that has been assigned.
int totalPartitionAssigned = 0;
for (String topic : cluster.topics()) {
while (assignments.get(index % numMetricFetchers).size() > totalPartitionAssigned / numMetricFetchers) {
index++;
}
Set<TopicPartition> assignmentForFetcher = assignments.get(index % numMetricFetchers);
List<PartitionInfo> partitionsForTopic = cluster.partitionsForTopic(topic);
for (PartitionInfo partitionInfo : partitionsForTopic) {
assignmentForFetcher.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
totalPartitionAssigned += partitionsForTopic.size();
}
// Print the assignments if the logger is set to debug level or lower.
maybeDumpAssignments(assignments);
return assignments;
}
示例2: partition
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
if (keyBytes != null) {
int hashCode = 0;
if (key instanceof Integer || key instanceof Long) {
hashCode = (int) key;
} else {
hashCode = key.hashCode();
}
hashCode = hashCode & 0x7fffffff;
return hashCode % numPartitions;
} else {
return 0;
}
}
示例3: partition
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
if (keyBytes == null) {
int nextValue = roundRobin.getAndIncrement();
List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
if (availablePartitions.size() > 0) {
int part = Utils.toPositive(nextValue) % availablePartitions.size();
return availablePartitions.get(part).partition();
} else {
// no partitions are available, give a non-available partition
return Utils.toPositive(nextValue) % numPartitions;
}
} else {
// hash the keyBytes to choose a partition
return Utils.toPositive(xxHasher.hash(keyBytes, 0, keyBytes.length, SEED)) % numPartitions;
}
}
示例4: partitionGroups
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
/**
* Generate tasks with the assigned topic partitions.
*
* @param topicGroups group of topics that need to be joined together
* @param metadata metadata of the consuming cluster
* @return The map from generated task ids to the assigned partitions
*/
public Map<TaskId, Set<TopicPartition>> partitionGroups(Map<Integer, Set<String>> topicGroups, Cluster metadata) {
Map<TaskId, Set<TopicPartition>> groups = new HashMap<>();
for (Map.Entry<Integer, Set<String>> entry : topicGroups.entrySet()) {
Integer topicGroupId = entry.getKey();
Set<String> topicGroup = entry.getValue();
int maxNumPartitions = maxNumPartitions(metadata, topicGroup);
for (int partitionId = 0; partitionId < maxNumPartitions; partitionId++) {
Set<TopicPartition> group = new HashSet<>(topicGroup.size());
for (String topic : topicGroup) {
List<PartitionInfo> partitions = metadata.partitionsForTopic(topic);
if (partitionId < partitions.size()) {
group.add(new TopicPartition(topic, partitionId));
}
}
groups.put(new TaskId(topicGroupId, partitionId), Collections.unmodifiableSet(group));
}
}
return Collections.unmodifiableMap(groups);
}
示例5: shouldSetClusterMetadataOnAssignment
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
@Test
public void shouldSetClusterMetadataOnAssignment() throws Exception {
final List<TopicPartition> topic = Collections.singletonList(new TopicPartition("topic", 0));
final Map<HostInfo, Set<TopicPartition>> hostState =
Collections.singletonMap(new HostInfo("localhost", 80),
Collections.singleton(new TopicPartition("topic", 0)));
final AssignmentInfo assignmentInfo = new AssignmentInfo(Collections.singletonList(new TaskId(0, 0)),
Collections.<TaskId, Set<TopicPartition>>emptyMap(),
hostState);
partitionAssignor.onAssignment(new PartitionAssignor.Assignment(topic, assignmentInfo.encode()));
final Cluster cluster = partitionAssignor.clusterMetadata();
final List<PartitionInfo> partitionInfos = cluster.partitionsForTopic("topic");
final PartitionInfo partitionInfo = partitionInfos.get(0);
assertEquals(1, partitionInfos.size());
assertEquals("topic", partitionInfo.topic());
assertEquals(0, partitionInfo.partition());
}
示例6: getClusterForCurrentTopics
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
private Cluster getClusterForCurrentTopics(Cluster cluster) {
Set<String> unauthorizedTopics = new HashSet<>();
Collection<PartitionInfo> partitionInfos = new ArrayList<>();
List<Node> nodes = Collections.emptyList();
Set<String> internalTopics = Collections.emptySet();
Node controller = null;
String clusterId = null;
if (cluster != null) {
clusterId = cluster.clusterResource().clusterId();
internalTopics = cluster.internalTopics();
unauthorizedTopics.addAll(cluster.unauthorizedTopics());
unauthorizedTopics.retainAll(this.topics.keySet());
for (String topic : this.topics.keySet()) {
List<PartitionInfo> partitionInfoList = cluster.partitionsForTopic(topic);
if (!partitionInfoList.isEmpty()) {
partitionInfos.addAll(partitionInfoList);
}
}
nodes = cluster.nodes();
controller = cluster.controller();
}
return new Cluster(clusterId, nodes, partitionInfos, unauthorizedTopics, internalTopics, controller);
}
示例7: partition
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
/**
* Compute the partition for the given record.
*
* @param topic The topic name
* @param key The key to partition on (or null if no key)
* @param keyBytes serialized key to partition on (or null if no key)
* @param value The value to partition on or null
* @param valueBytes serialized value to partition on or null
* @param cluster The current cluster metadata
*/
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
//通过Cluster拿到topic相关Partitions信息
//用kafka murmur2自定义方法和toPostive 与 partiton的数据进行模运算
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
// 分区数量
int numPartitions = partitions.size();
// 消息没有key,计数器自增轮训将消息发送到不同的parition里面
if (keyBytes == null) {
// 递增counter
int nextValue = nextValue(topic);
// 选择availablePartitions
List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
if (availablePartitions.size() > 0) {
int part = Utils.toPositive(nextValue) % availablePartitions.size();
return availablePartitions.get(part).partition();
} else {
// no partitions are available, give a non-available partition
return Utils.toPositive(nextValue) % numPartitions;
}
} else {
// hash the keyBytes to choose a partition
// 消息有key
return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
}
}
示例8: partitionsFor
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
/**
* Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it
* does not already have any metadata about the given topic.
*
* @param topic The topic to get partition metadata for
* @return The list of partitions
* @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
* function is called
* @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
* this function is called
* @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic
* @throws org.apache.kafka.common.errors.TimeoutException if the topic metadata could not be fetched before
* expiration of the configured request timeout
* @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
*/
@Override
public List<PartitionInfo> partitionsFor(String topic) {
acquire();
try {
Cluster cluster = this.metadata.fetch();
List<PartitionInfo> parts = cluster.partitionsForTopic(topic);
if (!parts.isEmpty())
return parts;
Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata(
new MetadataRequest.Builder(Collections.singletonList(topic), true), requestTimeoutMs);
return topicMetadata.get(topic);
} finally {
release();
}
}
示例9: metadataChanged
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
/**
* Check whether the metadata has changed.
*/
public static boolean metadataChanged(Cluster prev, Cluster curr) {
// Broker has changed.
Set<Node> prevNodeSet = new HashSet<>(prev.nodes());
if (prevNodeSet.size() != curr.nodes().size()) {
return true;
}
prevNodeSet.removeAll(curr.nodes());
if (!prevNodeSet.isEmpty()) {
return true;
}
// Topic has changed
if (!prev.topics().equals(curr.topics())) {
return true;
}
// partition has changed.
for (String topic : prev.topics()) {
if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) {
return true;
}
for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) {
PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition()));
if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) {
return true;
}
}
}
return false;
}
示例10: brokersWithPartitions
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
private Set<Integer> brokersWithPartitions(Cluster kafkaCluster) {
Set<Integer> allBrokers = new HashSet<>();
for (String topic : kafkaCluster.topics()) {
for (PartitionInfo pi : kafkaCluster.partitionsForTopic(topic)) {
for (Node node : pi.replicas()) {
allBrokers.add(node.id());
}
}
}
return allBrokers;
}
示例11: updateWindowMetricCompleteness
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
private void updateWindowMetricCompleteness(Cluster cluster, long window, String topic) {
int numValidPartitions = _validPartitionsPerTopicByWindows.get(window).get(topic).size();
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
// The topic may have been deleted so the cluster does not have it.
if (partitions != null) {
int numPartitions = partitions.size();
_validPartitionsByWindows.compute(window, (w, v) -> {
int newValue = (v == null ? 0 : v);
return numValidPartitions == numPartitions ? newValue + numPartitions : newValue;
});
}
}
示例12: partition
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
/**
* Compute the partition for the given record.
*
* @param topic The topic name
* @param key The key to partition on (or null if no key)
* @param keyBytes serialized key to partition on (or null if no key)
* @param value The value to partition on or null
* @param valueBytes serialized value to partition on or null
* @param cluster The current cluster metadata
*/
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
if (key != null && key.toString() != null) {
System.out.println(topic + " : " + key);
try {
int keyInt = Integer.parseInt(key.toString());
System.out.println("keyInt : " + keyInt);
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
System.out.println("numPartitions : " + numPartitions);
List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
System.out.println("availablePartitions : " + availablePartitions);
if (availablePartitions.size() > 0) {
int part = keyInt % availablePartitions.size();
System.out.println("part : " + part);
int[] parts = new int[availablePartitions.size()];
for (int i = 0; i < availablePartitions.size(); i++) {
parts[i] = availablePartitions.get(i).partition();
}
Arrays.sort(parts);
System.out.println("partition : " + parts[part]);
return parts[part];
} else {
return keyInt % numPartitions;
}
} catch (Exception e) {
}
}
return 0;
}
示例13: partition
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
int partitionNum = 0;
try {
partitionNum = Utils.murmur2(keyBytes);
} catch (Exception e) {
partitionNum = key.hashCode();
}
return Math.abs(partitionNum % numPartitions);
}
示例14: recentSnapshotsForTopic
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
/**
* Get all the snapshots for a topic before the given time, excluding the snapshot window that the given time falls in.
*/
private MetricSampleAggregationResult recentSnapshotsForTopic(String topic,
Cluster cluster,
Map<Long, Map<TopicPartition, AggregatedMetrics>> readyWindowedAggMetrics,
boolean includeAllTopics)
throws NotEnoughSnapshotsException {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
MetricSampleAggregationResult result = new MetricSampleAggregationResult(currentGeneration(), includeAllTopics);
for (PartitionInfo partition : partitions) {
TopicPartition tp = new TopicPartition(topic, partition.partition());
Snapshot[] snapshots = new Snapshot[readyWindowedAggMetrics.size()];
int index = 0;
for (Map.Entry<Long, Map<TopicPartition, AggregatedMetrics>> entry : readyWindowedAggMetrics.entrySet()) {
long snapshotWindow = entry.getKey();
SnapshotAndImputation partitionSnapShot =
partitionSnapshotForWindow(tp, cluster, snapshotWindow, snapshotWindow, includeAllTopics, true);
// There is an invalid partition for the topic, we just exclude the topic by returning the result immediately.
// At this point the result does not contain any snapshot, but only partition flaw.
if (partitionSnapShot.snapshot() == null) {
result.discardAllSnapshots();
result.recordPartitionWithSampleFlaw(tp, snapshotWindow, partitionSnapShot.imputation());
return result;
}
// Record the imputation and return the result without snapshot if there has been too many imputations.
if (partitionSnapShot.imputation() != null) {
result.recordPartitionWithSampleFlaw(tp, snapshotWindow, partitionSnapShot.imputation());
if (result.sampleFlaw(tp).size() > _numSnapshots * MAX_SNAPSHOT_PERCENT_WITH_FLAWS) {
LOG.debug("{} already has {} snapshot with flaws, excluding the partition from the model.",
tp, result.sampleFlaw(tp).size());
result.discardAllSnapshots();
return result;
}
}
snapshots[index++] = partitionSnapShot.snapshot();
}
result.addPartitionSnapshots(new TopicPartition(partition.topic(), partition.partition()), snapshots);
}
return result;
}
示例15: partition
import org.apache.kafka.common.Cluster; //导入方法依赖的package包/类
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> infos = cluster.partitionsForTopic(topic);
return Next.getAndIncrement() % infos.size();
}