本文整理汇总了Java中org.apache.kafka.common.TopicPartition类的典型用法代码示例。如果您正苦于以下问题:Java TopicPartition类的具体用法?Java TopicPartition怎么用?Java TopicPartition使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TopicPartition类属于org.apache.kafka.common包,在下文中一共展示了TopicPartition类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: assignPartition
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
* @return the consumer to which the given partition is assigned. The assignment should improve the overall balance
* of the partition assignments to consumers.
*/
private String assignPartition(TopicPartition partition,
TreeSet<String> sortedCurrentSubscriptions,
Map<String, List<TopicPartition>> currentAssignment,
Map<String, List<TopicPartition>> consumer2AllPotentialPartitions,
Map<TopicPartition, String> currentPartitionConsumer) {
for (String consumer: sortedCurrentSubscriptions) {
if (consumer2AllPotentialPartitions.get(consumer).contains(partition)) {
sortedCurrentSubscriptions.remove(consumer);
currentAssignment.get(consumer).add(partition);
currentPartitionConsumer.put(partition, consumer);
sortedCurrentSubscriptions.add(consumer);
return consumer;
}
}
return null;
}
示例2: createConsumer
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
* createConsumer - create a new consumer
* @return
* @throws Exception
*/
private Consumer<String, String> createConsumer() throws Exception {
Properties props = ConfUtils.getProps(CONSUMER_PROPS);
Consumer<String, String> consumer = new KafkaConsumer<>(props);
// Seek to end automatically
List<TopicPartition> pts = topics.stream().map(s -> new TopicPartition(s, 0)).collect(Collectors.toList());
consumer.assign(pts);
if(rollBack==0){
consumer.seekToEnd(pts);
}else{
for (TopicPartition topicPartition : pts) {
consumer.seek(topicPartition, consumer.position(topicPartition)-rollBack);
logger.info("Consumer seeked to -500000 :"+consumer.position(topicPartition));
}
}
return consumer;
}
示例3: recordPartitionLag
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private void recordPartitionLag(TopicPartition tp, long lag) {
this.recordsFetchLag.record(lag);
String name = partitionLagMetricName(tp);
Sensor recordsLag = this.metrics.getSensor(name);
if (recordsLag == null) {
recordsLag = this.metrics.sensor(name);
recordsLag.add(this.metrics.metricName(name,
metricsRegistry.partitionRecordsLag.group(),
metricsRegistry.partitionRecordsLag.description()), new Value());
recordsLag.add(this.metrics.metricName(name + "-max",
metricsRegistry.partitionRecordsLagMax.group(),
metricsRegistry.partitionRecordsLagMax.description()), new Max());
recordsLag.add(this.metrics.metricName(name + "-avg",
metricsRegistry.partitionRecordsLagAvg.group(),
metricsRegistry.partitionRecordsLagAvg.description()), new Avg());
}
recordsLag.record(lag);
}
示例4: assignNewPartitonsCreatesProcessors
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void assignNewPartitonsCreatesProcessors() {
PartitionProcessorFactory ppf = processorFactory();
AssignedPartitions ap = new AssignedPartitions(ppf);
Collection<TopicPartition> newPartitions = new ArrayList<>();
newPartitions.add(new TopicPartition(TOPIC, 3));
newPartitions.add(new TopicPartition(TOPIC, 1));
newPartitions.add(new TopicPartition(TOPIC, 99));
ap.assignNewPartitions(newPartitions);
assertFalse(ap.allPartitions().isEmpty());
Set<TopicPartition> partitions = ap.allPartitions();
assertEquals(3, partitions.size());
assertTrue(partitions.containsAll(newPartitions));
}
示例5: prepareOffsetCommitResponse
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final Map<TopicPartition, Long> partitionOffsets) {
final AtomicBoolean commitReceived = new AtomicBoolean(true);
Map<TopicPartition, Errors> response = new HashMap<>();
for (TopicPartition partition : partitionOffsets.keySet())
response.put(partition, Errors.NONE);
client.prepareResponseFrom(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
OffsetCommitRequest commitRequest = (OffsetCommitRequest) body;
for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
OffsetCommitRequest.PartitionData partitionData = commitRequest.offsetData().get(partitionOffset.getKey());
// verify that the expected offset has been committed
if (partitionData.offset != partitionOffset.getValue()) {
commitReceived.set(false);
return false;
}
}
return true;
}
}, offsetCommitResponse(response), coordinator);
return commitReceived;
}
示例6: updateReplicaReassignmentTimestamp
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
public static void updateReplicaReassignmentTimestamp(String brokerZkUrl,
ReplicaStat replicaStat) {
if (!replicaReassignmentTimestamps.containsKey(brokerZkUrl)) {
replicaReassignmentTimestamps.put(brokerZkUrl, new ConcurrentHashMap<>());
}
ConcurrentHashMap<TopicPartition, Long> replicaTimestamps =
replicaReassignmentTimestamps.get(brokerZkUrl);
TopicPartition topicPartition = new TopicPartition(
replicaStat.getTopic(), replicaStat.getPartition());
if (!replicaTimestamps.containsKey(topicPartition) ||
replicaTimestamps.get(topicPartition) < replicaStat.getTimestamp()) {
replicaTimestamps.put(topicPartition, replicaStat.getTimestamp());
}
}
示例7: shouldDeleteCheckpointFileOnCreationIfEosEnabled
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void shouldDeleteCheckpointFileOnCreationIfEosEnabled() throws Exception {
checkpoint.write(Collections.<TopicPartition, Long>emptyMap());
assertTrue(checkpointFile.exists());
ProcessorStateManager stateManager = null;
try {
stateManager = new ProcessorStateManager(
taskId,
noPartitions,
false,
stateDirectory,
Collections.<String, String>emptyMap(),
changelogReader,
true);
assertFalse(checkpointFile.exists());
} finally {
if (stateManager != null) {
stateManager.close(null);
}
}
}
示例8: toString
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
* Produces a string representation containing useful information about a StreamTask starting with the given indent.
* This is useful in debugging scenarios.
* @return A string representation of the StreamTask instance.
*/
public String toString(final String indent) {
final StringBuilder sb = new StringBuilder();
sb.append(indent);
sb.append("StreamsTask taskId: ");
sb.append(id);
sb.append("\n");
// print topology
if (topology != null) {
sb.append(indent).append(topology.toString(indent + "\t"));
}
// print assigned partitions
if (partitions != null && !partitions.isEmpty()) {
sb.append(indent).append("Partitions [");
for (final TopicPartition topicPartition : partitions) {
sb.append(topicPartition.toString()).append(", ");
}
sb.setLength(sb.length() - 2);
sb.append("]\n");
}
return sb.toString();
}
示例9: messagesRemaining
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private static long messagesRemaining(KafkaConsumer<String, String> consumer, TopicPartition partition) {
long currentPosition = consumer.position(partition);
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(singleton(partition));
if (endOffsets.containsKey(partition)) {
return endOffsets.get(partition) - currentPosition;
}
return 0;
}
示例10: update
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
* Record the stats, and update the topic partition list based on the stats
*
* @param stats the broker stats
*/
public void update(BrokerStats stats) {
if (stats == null
|| (latestStats != null && latestStats.getTimestamp() > stats.getTimestamp())
|| stats.getHasFailure()) {
return;
}
brokerName = stats.getName();
latestStats = stats;
if (rackId == null) {
rackId = stats.getRackId() != null ? stats.getRackId() : stats.getAvailabilityZone();
}
// TODO: handle null poniter expceiton properly
leaderReplicas = stats.getLeaderReplicas().stream().map(tps ->
new TopicPartition(tps.getTopic(), tps.getPartition())).collect(Collectors.toSet());
followerReplicas = stats.getFollowerReplicas().stream().map(tps ->
new TopicPartition(tps.getTopic(), tps.getPartition())).collect(Collectors.toSet());
}
示例11: testFetchResponseV4
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicPartition, FetchResponse.PartitionData> responseData = new LinkedHashMap<>();
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponse.AbortedTransaction> abortedTransactions = asList(
new FetchResponse.AbortedTransaction(10, 100),
new FetchResponse.AbortedTransaction(15, 50)
);
responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData(Errors.NONE, 100000,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, abortedTransactions, records));
responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData(Errors.NONE, 900000,
5, FetchResponse.INVALID_LOG_START_OFFSET, null, records));
responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData(Errors.NONE, 70000,
6, FetchResponse.INVALID_LOG_START_OFFSET, Collections.<FetchResponse.AbortedTransaction>emptyList(), records));
FetchResponse response = new FetchResponse(responseData, 10);
FetchResponse deserialized = FetchResponse.parse(toBuffer(response.toStruct((short) 4)), (short) 4);
assertEquals(responseData, deserialized.responseData());
}
示例12: testOneConsumerMultipleTopics
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
@Test
public void testOneConsumerMultipleTopics() {
String topic1 = "topic1";
String topic2 = "topic2";
String consumerId = "consumer";
Map<String, Integer> partitionsPerTopic = new HashMap<>();
partitionsPerTopic.put(topic1, 1);
partitionsPerTopic.put(topic2, 2);
Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic,
Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2))));
assertEquals(Collections.singleton(consumerId), assignment.keySet());
assertAssignment(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId));
}
示例13: partitionGroups
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
* Generate tasks with the assigned topic partitions.
*
* @param topicGroups group of topics that need to be joined together
* @param metadata metadata of the consuming cluster
* @return The map from generated task ids to the assigned partitions
*/
public Map<TaskId, Set<TopicPartition>> partitionGroups(Map<Integer, Set<String>> topicGroups, Cluster metadata) {
Map<TaskId, Set<TopicPartition>> groups = new HashMap<>();
for (Map.Entry<Integer, Set<String>> entry : topicGroups.entrySet()) {
Integer topicGroupId = entry.getKey();
Set<String> topicGroup = entry.getValue();
int maxNumPartitions = maxNumPartitions(metadata, topicGroup);
for (int partitionId = 0; partitionId < maxNumPartitions; partitionId++) {
Set<TopicPartition> group = new HashSet<>(topicGroup.size());
for (String topic : topicGroup) {
List<PartitionInfo> partitions = metadata.partitionsForTopic(topic);
if (partitionId < partitions.size()) {
group.add(new TopicPartition(topic, partitionId));
}
}
groups.put(new TaskId(topicGroupId, partitionId), Collections.unmodifiableSet(group));
}
}
return Collections.unmodifiableMap(groups);
}
示例14: next
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
/**
* Seek to the next 'page' of records.
*/
public void next() {
// Get all available partitions
final List<TopicPartition> topicPartitions = getAllPartitions();
// Get head offsets for each partition
final Map<TopicPartition, Long> tailOffsets = kafkaConsumer.endOffsets(topicPartitions);
// Loop over each partition
for (final TopicPartition topicPartition: topicPartitions) {
// Calculate our previous offsets
final long tailOffset = tailOffsets.get(topicPartition);
final long currentOffset = kafkaConsumer.position(topicPartition);
long newOffset = currentOffset + clientConfig.getMaxResultsPerPartition();
if (newOffset < tailOffset) {
newOffset = tailOffset;
}
logger.info("Partition: {} Previous Offset: {} New Offset: {}", topicPartition.partition(), currentOffset, newOffset);
// Seek to earlier offset
kafkaConsumer.seek(topicPartition, newOffset);
}
commit();
}
示例15: onMessageConsumed
import org.apache.kafka.common.TopicPartition; //导入依赖的package包/类
private synchronized void onMessageConsumed(ConsumerRecord<String, String> record) {
log.info(String.format("Consumed message: [%s]", record));
TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
if (!messagesByTopicPartition.containsKey(topicPartition)) {
messagesByTopicPartition.put(topicPartition, new VersionedMessages(Lists.newLinkedList()));
}
VersionedMessages versionedMessages = messagesByTopicPartition.get(topicPartition);
LinkedList<ConsumerRecord<String, String>> messages = versionedMessages.messages;
messages.addFirst(record);
if (messages.size() > maxTopicMessagesCount) {
messages.removeLast();
}
versionedMessages.version.incrementAndGet();
}